id
stringlengths
14
16
text
stringlengths
13
2.7k
source
stringlengths
57
178
d0c3c407787e-0
Source code for langchain.schema.retriever from __future__ import annotations import asyncio import warnings from abc import ABC, abstractmethod from functools import partial from inspect import signature from typing import TYPE_CHECKING, Any, Dict, List, Optional from langchain.load.dump import dumpd from langchain.schema.document import Document from langchain.schema.runnable import RunnableConfig, RunnableSerializable if TYPE_CHECKING: from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, Callbacks, ) [docs]class BaseRetriever(RunnableSerializable[str, List[Document]], ABC): """Abstract base class for a Document retrieval system. A retrieval system is defined as something that can take string queries and return the most 'relevant' Documents from some source. Example: .. code-block:: python class TFIDFRetriever(BaseRetriever, BaseModel): vectorizer: Any docs: List[Document] tfidf_array: Any k: int = 4 class Config: arbitrary_types_allowed = True def get_relevant_documents(self, query: str) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) query_vec = self.vectorizer.transform([query]) # Op -- (n_docs,1) -- Cosine Sim with each doc results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] """ # noqa: E501 class Config: """Configuration for this pydantic object."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-1
class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True _new_arg_supported: bool = False _expects_other_args: bool = False tags: Optional[List[str]] = None """Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a retriever with its use case. """ metadata: Optional[Dict[str, Any]] = None """Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a retriever with its use case. """ def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) # Version upgrade for old retrievers that implemented the public # methods directly. if cls.get_relevant_documents != BaseRetriever.get_relevant_documents: warnings.warn( "Retrievers must implement abstract `_get_relevant_documents` method" " instead of `get_relevant_documents`", DeprecationWarning, ) swap = cls.get_relevant_documents cls.get_relevant_documents = ( # type: ignore[assignment] BaseRetriever.get_relevant_documents ) cls._get_relevant_documents = swap # type: ignore[assignment] if ( hasattr(cls, "aget_relevant_documents")
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-2
if ( hasattr(cls, "aget_relevant_documents") and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents ): warnings.warn( "Retrievers must implement abstract `_aget_relevant_documents` method" " instead of `aget_relevant_documents`", DeprecationWarning, ) aswap = cls.aget_relevant_documents cls.aget_relevant_documents = ( # type: ignore[assignment] BaseRetriever.aget_relevant_documents ) cls._aget_relevant_documents = aswap # type: ignore[assignment] parameters = signature(cls._get_relevant_documents).parameters cls._new_arg_supported = parameters.get("run_manager") is not None # If a V1 retriever broke the interface and expects additional arguments cls._expects_other_args = ( len(set(parameters.keys()) - {"self", "query", "run_manager"}) > 0 ) [docs] def invoke( self, input: str, config: Optional[RunnableConfig] = None ) -> List[Document]: config = config or {} return self.get_relevant_documents( input, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), ) [docs] async def ainvoke( self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> List[Document]: config = config or {} return await self.aget_relevant_documents( input, callbacks=config.get("callbacks"), tags=config.get("tags"),
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-3
input, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), ) @abstractmethod def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """Asynchronously get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ return await asyncio.get_running_loop().run_in_executor( None, partial(self._get_relevant_documents, run_manager=run_manager), query ) [docs] def get_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Retrieve documents relevant to a query. Args: query: string to find relevant documents for callbacks: Callback manager or list of callbacks tags: Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-4
These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. metadata: Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. Returns: List of relevant documents """ from langchain.callbacks.manager import CallbackManager callback_manager = CallbackManager.configure( callbacks, None, verbose=kwargs.get("verbose", False), inheritable_tags=tags, local_tags=self.tags, inheritable_metadata=metadata, local_metadata=self.metadata, ) run_manager = callback_manager.on_retriever_start( dumpd(self), query, name=run_name, **kwargs, ) try: _kwargs = kwargs if self._expects_other_args else {} if self._new_arg_supported: result = self._get_relevant_documents( query, run_manager=run_manager, **_kwargs ) else: result = self._get_relevant_documents(query, **_kwargs) except Exception as e: run_manager.on_retriever_error(e) raise e else: run_manager.on_retriever_end( result, **kwargs, ) return result [docs] async def aget_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, **kwargs: Any,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-5
run_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Asynchronously get documents relevant to a query. Args: query: string to find relevant documents for callbacks: Callback manager or list of callbacks tags: Optional list of tags associated with the retriever. Defaults to None These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. metadata: Optional metadata associated with the retriever. Defaults to None This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. Returns: List of relevant documents """ from langchain.callbacks.manager import AsyncCallbackManager callback_manager = AsyncCallbackManager.configure( callbacks, None, verbose=kwargs.get("verbose", False), inheritable_tags=tags, local_tags=self.tags, inheritable_metadata=metadata, local_metadata=self.metadata, ) run_manager = await callback_manager.on_retriever_start( dumpd(self), query, name=run_name, **kwargs, ) try: _kwargs = kwargs if self._expects_other_args else {} if self._new_arg_supported: result = await self._aget_relevant_documents( query, run_manager=run_manager, **_kwargs ) else: result = await self._aget_relevant_documents(query, **_kwargs) except Exception as e: await run_manager.on_retriever_error(e) raise e else: await run_manager.on_retriever_end( result, **kwargs,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
d0c3c407787e-6
await run_manager.on_retriever_end( result, **kwargs, ) return result
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/retriever.html
2d5047ee1d6d-0
Source code for langchain.schema.prompt from __future__ import annotations from abc import ABC, abstractmethod from typing import List from langchain.load.serializable import Serializable from langchain.schema.messages import BaseMessage [docs]class PromptValue(Serializable, ABC): """Base abstract class for inputs to any language model. PromptValues can be converted to both LLM (pure text-generation) inputs and ChatModel inputs. """ [docs] @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True [docs] @abstractmethod def to_string(self) -> str: """Return prompt value as string.""" [docs] @abstractmethod def to_messages(self) -> List[BaseMessage]: """Return prompt as a list of Messages."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/prompt.html
b618006e1d91-0
Source code for langchain.schema.chat_history from __future__ import annotations from abc import ABC, abstractmethod from typing import List from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage [docs]class BaseChatMessageHistory(ABC): """Abstract base class for storing chat message history. See `ChatMessageHistory` for default implementation. Example: .. code-block:: python class FileChatMessageHistory(BaseChatMessageHistory): storage_path: str session_id: str @property def messages(self): with open(os.path.join(storage_path, session_id), 'r:utf-8') as f: messages = json.loads(f.read()) return messages_from_dict(messages) def add_message(self, message: BaseMessage) -> None: messages = self.messages.append(_message_to_dict(message)) with open(os.path.join(storage_path, session_id), 'w') as f: json.dump(f, messages) def clear(self): with open(os.path.join(storage_path, session_id), 'w') as f: f.write("[]") """ messages: List[BaseMessage] """A list of Messages stored in-memory.""" [docs] def add_user_message(self, message: str) -> None: """Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. """ self.add_message(HumanMessage(content=message)) [docs] def add_ai_message(self, message: str) -> None: """Convenience method for adding an AI message string to the store. Args: message: The string contents of an AI message. """
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/chat_history.html
b618006e1d91-1
Args: message: The string contents of an AI message. """ self.add_message(AIMessage(content=message)) [docs] @abstractmethod def add_message(self, message: BaseMessage) -> None: """Add a Message object to the store. Args: message: A BaseMessage object to store. """ raise NotImplementedError() [docs] @abstractmethod def clear(self) -> None: """Remove all messages from the store"""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/chat_history.html
9cc02dfe64d1-0
Source code for langchain.schema.vectorstore from __future__ import annotations import asyncio import logging import math import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Collection, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, ) from langchain.pydantic_v1 import Field, root_validator from langchain.schema import BaseRetriever from langchain.schema.document import Document from langchain.schema.embeddings import Embeddings if TYPE_CHECKING: from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) logger = logging.getLogger(__name__) VST = TypeVar("VST", bound="VectorStore") [docs]class VectorStore(ABC): """Interface for vector store.""" [docs] @abstractmethod def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" logger.debug(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-1
"""Access the query embedding object if available.""" logger.debug( f"{Embeddings.__name__} is not implemented for {self.__class__.__name__}" ) return None [docs] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ raise NotImplementedError("delete method must be implemented by subclass.") [docs] async def adelete( self, ids: Optional[List[str]] = None, **kwargs: Any ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ raise NotImplementedError("delete method must be implemented by subclass.") [docs] async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore.""" return await asyncio.get_running_loop().run_in_executor( None, partial(self.add_texts, **kwargs), texts, metadatas ) [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-2
"""Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts, metadatas, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return await self.aadd_texts(texts, metadatas, **kwargs) [docs] def search(self, query: str, search_type: str, **kwargs: Any) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return self.similarity_search(query, **kwargs) elif search_type == "mmr": return self.max_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) [docs] async def asearch(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-3
) [docs] async def asearch( self, query: str, search_type: str, **kwargs: Any ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return await self.asimilarity_search(query, **kwargs) elif search_type == "mmr": return await self.amax_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) [docs] @abstractmethod def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" @staticmethod def _euclidean_relevance_score_fn(distance: float) -> float: """Return a similarity score on a scale [0, 1].""" # The 'correct' relevance function # may differ depending on a few things, including: # - the distance / similarity metric used by the VectorStore # - the scale of your embeddings (OpenAI's are unit normed. Many # others are not!) # - embedding dimensionality # - etc. # This function converts the euclidean norm of normalized embeddings # (0 is most similar, sqrt(2) most dissimilar) # to a similarity function (0 to 1) return 1.0 - distance / math.sqrt(2) @staticmethod def _cosine_relevance_score_fn(distance: float) -> float:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-4
def _cosine_relevance_score_fn(distance: float) -> float: """Normalize the distance to a score on a scale [0, 1].""" return 1.0 - distance @staticmethod def _max_inner_product_relevance_score_fn(distance: float) -> float: """Normalize the distance to a score on a scale [0, 1].""" if distance > 0: return 1.0 - distance return -1.0 * distance def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. Vectorstores should define their own selection based method of relevance. """ raise NotImplementedError [docs] def similarity_search_with_score( self, *args: Any, **kwargs: Any ) -> List[Tuple[Document, float]]: """Run similarity search with distance.""" raise NotImplementedError [docs] async def asimilarity_search_with_score( self, *args: Any, **kwargs: Any ) -> List[Tuple[Document, float]]: """Run similarity search with distance asynchronously.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search_with_score, *args, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-5
return await asyncio.get_event_loop().run_in_executor(None, func) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Default similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ relevance_score_fn = self._select_relevance_score_fn() docs_and_scores = self.similarity_search_with_score(query, k, **kwargs) return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] async def _asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Default async similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-6
k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ relevance_score_fn = self._select_relevance_score_fn() docs_and_scores = await self.asimilarity_search_with_score(query, k, **kwargs) return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ score_threshold = kwargs.pop("score_threshold", None) docs_and_similarities = self._similarity_search_with_relevance_scores( query, k=k, **kwargs ) if any( similarity < 0.0 or similarity > 1.0 for _, similarity in docs_and_similarities ): warnings.warn( "Relevance scores must be between"
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-7
): warnings.warn( "Relevance scores must be between" f" 0 and 1, got {docs_and_similarities}" ) if score_threshold is not None: docs_and_similarities = [ (doc, similarity) for doc, similarity in docs_and_similarities if similarity >= score_threshold ] if len(docs_and_similarities) == 0: warnings.warn( "No relevant docs were retrieved using the relevance score" f" threshold {score_threshold}" ) return docs_and_similarities [docs] async def asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1], asynchronously. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ score_threshold = kwargs.pop("score_threshold", None) docs_and_similarities = await self._asimilarity_search_with_relevance_scores( query, k=k, **kwargs ) if any( similarity < 0.0 or similarity > 1.0 for _, similarity in docs_and_similarities ): warnings.warn(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-8
for _, similarity in docs_and_similarities ): warnings.warn( "Relevance scores must be between" f" 0 and 1, got {docs_and_similarities}" ) if score_threshold is not None: docs_and_similarities = [ (doc, similarity) for doc, similarity in docs_and_similarities if similarity >= score_threshold ] if len(docs_and_similarities) == 0: warnings.warn( "No relevant docs were retrieved using the relevance score" f" threshold {score_threshold}" ) return docs_and_similarities [docs] async def asimilarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search, query, k=k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ raise NotImplementedError [docs] async def asimilarity_search_by_vector(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-9
raise NotImplementedError [docs] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search_by_vector, embedding, k=k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-10
k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial( self.max_marginal_relevance_search, query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs, ) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-11
List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" raise NotImplementedError [docs] @classmethod def from_documents( cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod async def afrom_documents( cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod @abstractmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-12
**kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" [docs] @classmethod async def afrom_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" return await asyncio.get_running_loop().run_in_executor( None, partial(cls.from_texts, **kwargs), texts, embedding, metadatas ) def _get_retriever_tags(self) -> List[str]: """Get tags for retriever.""" tags = [self.__class__.__name__] if self.embeddings: tags.append(self.embeddings.__class__.__name__) return tags [docs] def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: """Return VectorStoreRetriever initialized from this VectorStore. Args: search_type (Optional[str]): Defines the type of search that the Retriever should perform. Can be "similarity" (default), "mmr", or "similarity_score_threshold". search_kwargs (Optional[Dict]): Keyword arguments to pass to the search function. Can include things like: k: Amount of documents to return (Default: 4) score_threshold: Minimum relevance threshold for similarity_score_threshold fetch_k: Amount of documents to pass to MMR algorithm (Default: 20) lambda_mult: Diversity of results returned by MMR; 1 for minimum diversity and 0 for maximum. (Default: 0.5)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-13
filter: Filter by document metadata Returns: VectorStoreRetriever: Retriever class for VectorStore. Examples: .. code-block:: python # Retrieve more documents with higher diversity # Useful if your dataset has many similar documents docsearch.as_retriever( search_type="mmr", search_kwargs={'k': 6, 'lambda_mult': 0.25} ) # Fetch more documents for the MMR algorithm to consider # But only return the top 5 docsearch.as_retriever( search_type="mmr", search_kwargs={'k': 5, 'fetch_k': 50} ) # Only retrieve documents that have a relevance score # Above a certain threshold docsearch.as_retriever( search_type="similarity_score_threshold", search_kwargs={'score_threshold': 0.8} ) # Only get the single most similar document from the dataset docsearch.as_retriever(search_kwargs={'k': 1}) # Use a filter to only retrieve documents from a specific paper docsearch.as_retriever( search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}} ) """ tags = kwargs.pop("tags", None) or [] tags.extend(self._get_retriever_tags()) return VectorStoreRetriever(vectorstore=self, **kwargs, tags=tags) [docs]class VectorStoreRetriever(BaseRetriever): """Base Retriever class for VectorStore.""" vectorstore: VectorStore """VectorStore to use for retrieval.""" search_type: str = "similarity"
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-14
search_type: str = "similarity" """Type of search to perform. Defaults to "similarity".""" search_kwargs: dict = Field(default_factory=dict) """Keyword arguments to pass to the search function.""" allowed_search_types: ClassVar[Collection[str]] = ( "similarity", "similarity_score_threshold", "mmr", ) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" search_type = values["search_type"] if search_type not in cls.allowed_search_types: raise ValueError( f"search_type of {search_type} not allowed. Valid values are: " f"{cls.allowed_search_types}" ) if search_type == "similarity_score_threshold": score_threshold = values["search_kwargs"].get("score_threshold") if (score_threshold is None) or (not isinstance(score_threshold, float)): raise ValueError( "`score_threshold` is not specified with a float value(0~1) " "in `search_kwargs`." ) return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "similarity_score_threshold": docs_and_similarities = ( self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-15
query, **self.search_kwargs ) ) docs = [doc for doc, _ in docs_and_similarities] elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: if self.search_type == "similarity": docs = await self.vectorstore.asimilarity_search( query, **self.search_kwargs ) elif self.search_type == "similarity_score_threshold": docs_and_similarities = ( await self.vectorstore.asimilarity_search_with_relevance_scores( query, **self.search_kwargs ) ) docs = [doc for doc, _ in docs_and_similarities] elif self.search_type == "mmr": docs = await self.vectorstore.amax_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
9cc02dfe64d1-16
) -> List[str]: """Add documents to vectorstore.""" return await self.vectorstore.aadd_documents(documents, **kwargs)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/vectorstore.html
52bb77ea5ac5-0
Source code for langchain.schema.output_parser from __future__ import annotations import asyncio import functools from abc import ABC, abstractmethod from typing import ( Any, AsyncIterator, Dict, Generic, Iterator, List, Optional, Type, TypeVar, Union, ) from typing_extensions import get_args from langchain.schema.messages import AnyMessage, BaseMessage, BaseMessageChunk from langchain.schema.output import ( ChatGeneration, ChatGenerationChunk, Generation, GenerationChunk, ) from langchain.schema.prompt import PromptValue from langchain.schema.runnable import RunnableConfig, RunnableSerializable T = TypeVar("T") [docs]class BaseLLMOutputParser(Generic[T], ABC): """Abstract base class for parsing the outputs of a model.""" [docs] @abstractmethod def parse_result(self, result: List[Generation], *, partial: bool = False) -> T: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ [docs] async def aparse_result( self, result: List[Generation], *, partial: bool = False ) -> T: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return await asyncio.get_running_loop().run_in_executor(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-1
""" return await asyncio.get_running_loop().run_in_executor( None, self.parse_result, result ) [docs]class BaseGenerationOutputParser( BaseLLMOutputParser, RunnableSerializable[Union[str, BaseMessage], T] ): """Base class to parse the output of an LLM call.""" @property def InputType(self) -> Any: return Union[str, AnyMessage] @property def OutputType(self) -> Type[T]: # even though mypy complains this isn't valid, # it is good enough for pydantic to build the schema from return T # type: ignore[misc] [docs] def invoke( self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None ) -> T: if isinstance(input, BaseMessage): return self._call_with_config( lambda inner_input: self.parse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs] async def ainvoke( self, input: str | BaseMessage, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> T: if isinstance(input, BaseMessage): return await self._acall_with_config( lambda inner_input: self.aparse_result( [ChatGeneration(message=inner_input)] ), input, config,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-2
), input, config, run_type="parser", ) else: return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs]class BaseOutputParser( BaseLLMOutputParser, RunnableSerializable[Union[str, BaseMessage], T] ): """Base class to parse the output of an LLM call. Output parsers help structure language model responses. Example: .. code-block:: python class BooleanOutputParser(BaseOutputParser[bool]): true_val: str = "YES" false_val: str = "NO" def parse(self, text: str) -> bool: cleaned_text = text.strip().upper() if cleaned_text not in (self.true_val.upper(), self.false_val.upper()): raise OutputParserException( f"BooleanOutputParser expected output value to either be " f"{self.true_val} or {self.false_val} (case-insensitive). " f"Received {cleaned_text}." ) return cleaned_text == self.true_val.upper() @property def _type(self) -> str: return "boolean_output_parser" """ # noqa: E501 @property def InputType(self) -> Any: return Union[str, AnyMessage] @property def OutputType(self) -> Type[T]: for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined] type_args = get_args(cls) if type_args and len(type_args) == 1: return type_args[0]
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-3
return type_args[0] raise TypeError( f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. " "Override the OutputType property to specify the output type." ) [docs] def invoke( self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None ) -> T: if isinstance(input, BaseMessage): return self._call_with_config( lambda inner_input: self.parse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs] async def ainvoke( self, input: str | BaseMessage, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> T: if isinstance(input, BaseMessage): return await self._acall_with_config( lambda inner_input: self.aparse_result( [ChatGeneration(message=inner_input)] ), input, config, run_type="parser", ) else: return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), input, config, run_type="parser", ) [docs] def parse_result(self, result: List[Generation], *, partial: bool = False) -> T: """Parse a list of candidate model Generations into a specific format.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-4
"""Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return self.parse(result[0].text) [docs] @abstractmethod def parse(self, text: str) -> T: """Parse a single string model output into some structure. Args: text: String output of a language model. Returns: Structured output. """ [docs] async def aparse_result( self, result: List[Generation], *, partial: bool = False ) -> T: """Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return await asyncio.get_running_loop().run_in_executor( None, functools.partial(self.parse_result, partial=partial), result ) [docs] async def aparse(self, text: str) -> T: """Parse a single string model output into some structure. Args: text: String output of a language model. Returns: Structured output. """ return await asyncio.get_running_loop().run_in_executor(None, self.parse, text)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-5
return await asyncio.get_running_loop().run_in_executor(None, self.parse, text) # TODO: rename 'completion' -> 'text'. [docs] def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any: """Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Args: completion: String output of a language model. prompt: Input PromptValue. Returns: Structured output """ return self.parse(completion) [docs] def get_format_instructions(self) -> str: """Instructions on how the LLM output should be formatted.""" raise NotImplementedError @property def _type(self) -> str: """Return the output parser type for serialization.""" raise NotImplementedError( f"_type property is not implemented in class {self.__class__.__name__}." " This is required for serialization." ) [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of output parser.""" output_parser_dict = super().dict(**kwargs) try: output_parser_dict["_type"] = self._type except NotImplementedError: pass return output_parser_dict [docs]class BaseTransformOutputParser(BaseOutputParser[T]): """Base class for an output parser that can handle streaming input.""" def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[T]: for chunk in input: if isinstance(chunk, BaseMessage): yield self.parse_result([ChatGeneration(message=chunk)]) else:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-6
yield self.parse_result([ChatGeneration(message=chunk)]) else: yield self.parse_result([Generation(text=chunk)]) async def _atransform( self, input: AsyncIterator[Union[str, BaseMessage]] ) -> AsyncIterator[T]: async for chunk in input: if isinstance(chunk, BaseMessage): yield self.parse_result([ChatGeneration(message=chunk)]) else: yield self.parse_result([Generation(text=chunk)]) [docs] def transform( self, input: Iterator[Union[str, BaseMessage]], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Iterator[T]: yield from self._transform_stream_with_config( input, self._transform, config, run_type="parser" ) [docs] async def atransform( self, input: AsyncIterator[Union[str, BaseMessage]], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> AsyncIterator[T]: async for chunk in self._atransform_stream_with_config( input, self._atransform, config, run_type="parser" ): yield chunk [docs]class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]): """Base class for an output parser that can handle streaming input.""" diff: bool = False """In streaming mode, whether to yield diffs between the previous and current parsed output, or just the current parsed output. """ def _diff(self, prev: Optional[T], next: T) -> T: """Convert parsed outputs into a diff format. The semantics of this are up to the output parser.""" raise NotImplementedError()
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-7
up to the output parser.""" raise NotImplementedError() def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]: prev_parsed = None acc_gen = None for chunk in input: if isinstance(chunk, BaseMessageChunk): chunk_gen: Generation = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.dict()) ) else: chunk_gen = GenerationChunk(text=chunk) if acc_gen is None: acc_gen = chunk_gen else: acc_gen += chunk_gen parsed = self.parse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff: yield self._diff(prev_parsed, parsed) else: yield parsed prev_parsed = parsed async def _atransform( self, input: AsyncIterator[Union[str, BaseMessage]] ) -> AsyncIterator[T]: prev_parsed = None acc_gen = None async for chunk in input: if isinstance(chunk, BaseMessageChunk): chunk_gen: Generation = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.dict()) ) else: chunk_gen = GenerationChunk(text=chunk) if acc_gen is None: acc_gen = chunk_gen else: acc_gen += chunk_gen parsed = self.parse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-8
if parsed is not None and parsed != prev_parsed: if self.diff: yield self._diff(prev_parsed, parsed) else: yield parsed prev_parsed = parsed [docs]class StrOutputParser(BaseTransformOutputParser[str]): """OutputParser that parses LLMResult into the top likely string.""" [docs] @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True @property def _type(self) -> str: """Return the output parser type for serialization.""" return "default" [docs] def parse(self, text: str) -> str: """Returns the input text with no changes.""" return text # TODO: Deprecate NoOpOutputParser = StrOutputParser [docs]class OutputParserException(ValueError): """Exception that output parsers should raise to signify a parsing error. This exists to differentiate parsing errors from other code or execution errors that also may arise inside the output parser. OutputParserExceptions will be available to catch and handle in ways to fix the parsing error, while other errors will be raised. Args: error: The error that's being re-raised or an error message. observation: String explanation of error which can be passed to a model to try and remediate the issue. llm_output: String model output which is error-ing. send_to_llm: Whether to send the observation and llm_output back to an Agent after an OutputParserException has been raised. This gives the underlying model driving the agent the context that the previous output was improperly structured, in the hopes that it will update the output to the correct format. """
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
52bb77ea5ac5-9
format. """ def __init__( self, error: Any, observation: Optional[str] = None, llm_output: Optional[str] = None, send_to_llm: bool = False, ): super(OutputParserException, self).__init__(error) if send_to_llm: if observation is None or llm_output is None: raise ValueError( "Arguments 'observation' & 'llm_output'" " are required if 'send_to_llm' is True" ) self.observation = observation self.llm_output = llm_output self.send_to_llm = send_to_llm
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output_parser.html
cf75cb40b062-0
Source code for langchain.schema.language_model from __future__ import annotations from abc import ABC, abstractmethod from functools import lru_cache from typing import ( TYPE_CHECKING, Any, List, Optional, Sequence, Set, TypeVar, Union, ) from typing_extensions import TypeAlias from langchain.schema.messages import AnyMessage, BaseMessage, get_buffer_string from langchain.schema.output import LLMResult from langchain.schema.prompt import PromptValue from langchain.schema.runnable import RunnableSerializable from langchain.utils import get_pydantic_field_names if TYPE_CHECKING: from langchain.callbacks.manager import Callbacks @lru_cache(maxsize=None) # Cache the tokenizer def get_tokenizer() -> Any: try: from transformers import GPT2TokenizerFast except ImportError: raise ImportError( "Could not import transformers python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install transformers`." ) # create a GPT-2 tokenizer instance return GPT2TokenizerFast.from_pretrained("gpt2") def _get_token_ids_default_method(text: str) -> List[int]: """Encode the text into token IDs.""" # get the cached tokenizer tokenizer = get_tokenizer() # tokenize the text using the GPT-2 tokenizer return tokenizer.encode(text) LanguageModelInput = Union[PromptValue, str, List[BaseMessage]] LanguageModelOutput = TypeVar("LanguageModelOutput") [docs]class BaseLanguageModel( RunnableSerializable[LanguageModelInput, LanguageModelOutput], ABC ): """Abstract base class for interfacing with language models.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-1
): """Abstract base class for interfacing with language models. All language model wrappers inherit from BaseLanguageModel. Exposes three main methods: - generate_prompt: generate language model outputs for a sequence of prompt values. A prompt value is a model input that can be converted to any language model input format (string or messages). - predict: pass in a single string to a language model and return a string prediction. - predict_messages: pass in a sequence of BaseMessages (corresponding to a single model call) to a language model and return a BaseMessage prediction. Each of these has an equivalent asynchronous method. """ @property def InputType(self) -> TypeAlias: """Get the input type for this runnable.""" from langchain.prompts.base import StringPromptValue from langchain.prompts.chat import ChatPromptValueConcrete # This is a version of LanguageModelInput which replaces the abstract # base class BaseMessage with a union of its subclasses, which makes # for a much better schema. return Union[ str, Union[StringPromptValue, ChatPromptValueConcrete], List[AnyMessage], ] [docs] @abstractmethod def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: """Pass a sequence of prompts to the model and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: 1. take advantage of batched calls,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-2
1. take advantage of batched calls, 2. need more output from the model than just the top generated value, 3. are building chains that are agnostic to the underlying language model type (e.g., pure text completion models vs chat models). Args: prompts: List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks: Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: An LLMResult, which contains a list of candidate Generations for each input prompt and additional model provider-specific output. """ [docs] @abstractmethod async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: """Asynchronously pass a sequence of prompts and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: 1. take advantage of batched calls, 2. need more output from the model than just the top generated value, 3. are building chains that are agnostic to the underlying language model type (e.g., pure text completion models vs chat models).
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-3
type (e.g., pure text completion models vs chat models). Args: prompts: List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks: Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: An LLMResult, which contains a list of candidate Generations for each input prompt and additional model provider-specific output. """ [docs] @abstractmethod def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: """Pass a single string input to the model and return a string prediction. Use this method when passing in raw text. If you want to pass in specific types of chat messages, use predict_messages. Args: text: String input to pass to the model. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a string. """ [docs] @abstractmethod def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-4
stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: """Pass a message sequence to the model and return a message prediction. Use this method when passing in chat messages. If you want to pass in raw text, use predict. Args: messages: A sequence of chat messages corresponding to a single model input. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a message. """ [docs] @abstractmethod async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: """Asynchronously pass a string to the model and return a string prediction. Use this method when calling pure text generation models and only the top candidate generation is needed. Args: text: String input to pass to the model. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a string. """ [docs] @abstractmethod async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: """Asynchronously pass messages to the model and return a message prediction.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-5
"""Asynchronously pass messages to the model and return a message prediction. Use this method when calling chat models and only the top candidate generation is needed. Args: messages: A sequence of chat messages corresponding to a single model input. stop: Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs: Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns: Top model prediction as a message. """ [docs] def get_token_ids(self, text: str) -> List[int]: """Return the ordered ids of the tokens in a text. Args: text: The string input to tokenize. Returns: A list of ids corresponding to the tokens in the text, in order they occur in the text. """ return _get_token_ids_default_method(text) [docs] def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ return len(self.get_token_ids(text)) [docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Get the number of tokens in the messages. Useful for checking if an input will fit in a model's context window. Args: messages: The message inputs to tokenize. Returns: The sum of the number of tokens across the messages. """
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
cf75cb40b062-6
Returns: The sum of the number of tokens across the messages. """ return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages]) @classmethod def _all_required_field_names(cls) -> Set: """DEPRECATED: Kept for backwards compatibility. Use get_pydantic_field_names. """ return get_pydantic_field_names(cls)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/language_model.html
c0e052313c97-0
Source code for langchain.schema.storage from abc import ABC, abstractmethod from typing import Generic, Iterator, List, Optional, Sequence, Tuple, TypeVar, Union K = TypeVar("K") V = TypeVar("V") [docs]class BaseStore(Generic[K, V], ABC): """Abstract interface for a key-value store.""" [docs] @abstractmethod def mget(self, keys: Sequence[K]) -> List[Optional[V]]: """Get the values associated with the given keys. Args: keys (Sequence[K]): A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ [docs] @abstractmethod def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None: """Set the values for the given keys. Args: key_value_pairs (Sequence[Tuple[K, V]]): A sequence of key-value pairs. """ [docs] @abstractmethod def mdelete(self, keys: Sequence[K]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[K]): A sequence of keys to delete. """ [docs] @abstractmethod def yield_keys( self, *, prefix: Optional[str] = None ) -> Union[Iterator[K], Iterator[str]]: """Get an iterator over keys that match the given prefix. Args: prefix (str): The prefix to match. Returns: Iterator[K | str]: An iterator over keys that match the given prefix. This method is allowed to return an iterator over either K or str
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/storage.html
c0e052313c97-1
This method is allowed to return an iterator over either K or str depending on what makes more sense for the given store. """
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/storage.html
cf826ffac3d0-0
Source code for langchain.schema.embeddings import asyncio from abc import ABC, abstractmethod from typing import List [docs]class Embeddings(ABC): """Interface for embedding models.""" [docs] @abstractmethod def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed search docs.""" [docs] @abstractmethod def embed_query(self, text: str) -> List[float]: """Embed query text.""" [docs] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: """Asynchronous Embed search docs.""" return await asyncio.get_running_loop().run_in_executor( None, self.embed_documents, texts ) [docs] async def aembed_query(self, text: str) -> List[float]: """Asynchronous Embed query text.""" return await asyncio.get_running_loop().run_in_executor( None, self.embed_query, text )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/embeddings.html
c54a189d87e1-0
Source code for langchain.schema.memory from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Dict, List from langchain.load.serializable import Serializable [docs]class BaseMemory(Serializable, ABC): """Abstract base class for memory in Chains. Memory refers to state in Chains. Memory can be used to store information about past executions of a Chain and inject that information into the inputs of future executions of the Chain. For example, for conversational Chains Memory can be used to store conversations and automatically add them to future model prompts so that the model has the necessary context to respond coherently to the latest input. Example: .. code-block:: python class SimpleMemory(BaseMemory): memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: pass def clear(self) -> None: pass """ # noqa: E501 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property @abstractmethod def memory_variables(self) -> List[str]: """The string keys this memory class will add to chain inputs.""" [docs] @abstractmethod def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain.""" [docs] @abstractmethod
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/memory.html
c54a189d87e1-1
[docs] @abstractmethod def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this chain run to memory.""" [docs] @abstractmethod def clear(self) -> None: """Clear memory contents."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/memory.html
abf03308bc75-0
Source code for langchain.schema.messages from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union from typing_extensions import Literal from langchain.load.serializable import Serializable from langchain.pydantic_v1 import Extra, Field if TYPE_CHECKING: from langchain.prompts.chat import ChatPromptTemplate [docs]def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of HumanMessages. ai_prefix: THe prefix to prepend to contents of AIMessages. Returns: A single string concatenation of all input messages. Example: .. code-block:: python from langchain.schema import AIMessage, HumanMessage messages = [ HumanMessage(content="Hi, how are you?"), AIMessage(content="Good, how are you?"), ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" """ string_messages = [] for m in messages: if isinstance(m, HumanMessage): role = human_prefix elif isinstance(m, AIMessage): role = ai_prefix elif isinstance(m, SystemMessage): role = "System" elif isinstance(m, FunctionMessage): role = "Function" elif isinstance(m, ChatMessage): role = m.role else: raise ValueError(f"Got unsupported message type: {m}")
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-1
else: raise ValueError(f"Got unsupported message type: {m}") message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" string_messages.append(message) return "\n".join(string_messages) [docs]class BaseMessage(Serializable): """The base abstract Message class. Messages are the inputs and outputs of ChatModels. """ content: Union[str, List[Union[str, Dict]]] """The string contents of the message.""" additional_kwargs: dict = Field(default_factory=dict) """Any additional information.""" type: str class Config: extra = Extra.allow [docs] @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True def __add__(self, other: Any) -> ChatPromptTemplate: from langchain.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other [docs]def merge_content( first_content: Union[str, List[Union[str, Dict]]], second_content: Union[str, List[Union[str, Dict]]], ) -> Union[str, List[Union[str, Dict]]]: # If first chunk is a string if isinstance(first_content, str): # If the second chunk is also a string, then merge them naively if isinstance(second_content, str): return first_content + second_content # If the second chunk is a list, add the first chunk to the start of the list else:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-2
else: return_list: List[Union[str, Dict]] = [first_content] return return_list + second_content # If both are lists, merge them naively elif isinstance(second_content, List): return first_content + second_content # If the first content is a list, and the second content is a string else: # If the last element of the first content is a string # Add the second content to the last element if isinstance(first_content[-1], str): return first_content[:-1] + [first_content[-1] + second_content] else: # Otherwise, add the second content as a new element of the list return first_content + [second_content] [docs]class BaseMessageChunk(BaseMessage): """A Message chunk, which can be concatenated with other Message chunks.""" def _merge_kwargs_dict( self, left: Dict[str, Any], right: Dict[str, Any] ) -> Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif type(merged[k]) != type(v): raise ValueError( f'additional_kwargs["{k}"] already exists in this message,' " but with a different type." ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) else: raise ValueError( f"Additional kwargs key {k} already exists in this message." ) return merged
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-3
) return merged def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, BaseMessageChunk): # If both are (subclasses of) BaseMessageChunk, # concat into a single BaseMessageChunk if isinstance(self, ChatMessageChunk): return self.__class__( role=self.role, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return self.__class__( content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) else: raise TypeError( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) [docs]class HumanMessage(BaseMessage): """A Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ type: Literal["human"] = "human" HumanMessage.update_forward_refs() [docs]class HumanMessageChunk(HumanMessage, BaseMessageChunk): """A Human Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501 [docs]class AIMessage(BaseMessage):
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-4
[docs]class AIMessage(BaseMessage): """A Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ type: Literal["ai"] = "ai" AIMessage.update_forward_refs() [docs]class AIMessageChunk(AIMessage, BaseMessageChunk): """A Message chunk from an AI.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501 def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, AIMessageChunk): if self.example != other.example: raise ValueError( "Cannot concatenate AIMessageChunks with different example values." ) return self.__class__( example=self.example, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) [docs]class SystemMessage(BaseMessage): """A Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ type: Literal["system"] = "system" SystemMessage.update_forward_refs() [docs]class SystemMessageChunk(SystemMessage, BaseMessageChunk): """A System Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-5
# Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501 [docs]class FunctionMessage(BaseMessage): """A Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" type: Literal["function"] = "function" FunctionMessage.update_forward_refs() [docs]class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): """A Function Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment] def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, FunctionMessageChunk): if self.name != other.name: raise ValueError( "Cannot concatenate FunctionMessageChunks with different names." ) return self.__class__( name=self.name, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) [docs]class ToolMessage(BaseMessage): """A Message for passing the result of executing a tool back to a model.""" tool_call_id: str """Tool call that this message is responding to."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-6
tool_call_id: str """Tool call that this message is responding to.""" type: Literal["tool"] = "tool" ToolMessage.update_forward_refs() [docs]class ToolMessageChunk(ToolMessage, BaseMessageChunk): """A Tool Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment] def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ToolMessageChunk): if self.tool_call_id != other.tool_call_id: raise ValueError( "Cannot concatenate ToolMessageChunks with different names." ) return self.__class__( tool_call_id=self.tool_call_id, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) [docs]class ChatMessage(BaseMessage): """A Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" type: Literal["chat"] = "chat" ChatMessage.update_forward_refs() [docs]class ChatMessageChunk(ChatMessage, BaseMessageChunk): """A Chat Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-7
# non-chunk variant. type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ChatMessageChunk): if self.role != other.role: raise ValueError( "Cannot concatenate ChatMessageChunks with different roles." ) return self.__class__( role=self.role, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) AnyMessage = Union[ AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage ] def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} [docs]def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [_message_to_dict(m) for m in messages] def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function":
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
abf03308bc75-8
return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) elif _type == "tool": return ToolMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") [docs]def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: messages: Sequence of messages (as dicts) to convert. Returns: List of messages (BaseMessages). """ return [_message_from_dict(m) for m in messages]
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/messages.html
f7c3fcd00905-0
Source code for langchain.schema.output from __future__ import annotations from copy import deepcopy from typing import Any, Dict, List, Literal, Optional from uuid import UUID from langchain.load.serializable import Serializable from langchain.pydantic_v1 import BaseModel, root_validator from langchain.schema.messages import BaseMessage, BaseMessageChunk [docs]class Generation(Serializable): """A single text generation output.""" text: str """Generated text output.""" generation_info: Optional[Dict[str, Any]] = None """Raw response from the provider. May include things like the reason for finishing or token log probabilities. """ type: Literal["Generation"] = "Generation" """Type is used exclusively for serialization purposes.""" # TODO: add log probs as separate attribute [docs] @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True [docs]class GenerationChunk(Generation): """A Generation chunk, which can be concatenated with other Generation chunks.""" def __add__(self, other: GenerationChunk) -> GenerationChunk: if isinstance(other, GenerationChunk): generation_info = ( {**(self.generation_info or {}), **(other.generation_info or {})} if self.generation_info is not None or other.generation_info is not None else None ) return GenerationChunk( text=self.text + other.text, generation_info=generation_info, ) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) [docs]class ChatGeneration(Generation): """A single chat generation output."""
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
f7c3fcd00905-1
[docs]class ChatGeneration(Generation): """A single chat generation output.""" text: str = "" """*SHOULD NOT BE SET DIRECTLY* The text contents of the output message.""" message: BaseMessage """The message output by the chat model.""" # Override type to be ChatGeneration, ignore mypy error as this is intentional type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment] """Type is used exclusively for serialization purposes.""" @root_validator def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Set the text attribute to be the contents of the message.""" try: values["text"] = values["message"].content except (KeyError, AttributeError) as e: raise ValueError("Error while initializing ChatGeneration") from e return values [docs]class ChatGenerationChunk(ChatGeneration): """A ChatGeneration chunk, which can be concatenated with other ChatGeneration chunks. Attributes: message: The message chunk output by the chat model. """ message: BaseMessageChunk # Override type to be ChatGeneration, ignore mypy error as this is intentional type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] # noqa: E501 """Type is used exclusively for serialization purposes.""" def __add__(self, other: ChatGenerationChunk) -> ChatGenerationChunk: if isinstance(other, ChatGenerationChunk): generation_info = ( {**(self.generation_info or {}), **(other.generation_info or {})} if self.generation_info is not None or other.generation_info is not None else None )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
f7c3fcd00905-2
else None ) return ChatGenerationChunk( message=self.message + other.message, generation_info=generation_info, ) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) [docs]class RunInfo(BaseModel): """Class that contains metadata for a single execution of a Chain or model.""" run_id: UUID """A unique identifier for the model or chain run.""" [docs]class ChatResult(BaseModel): """Class that contains all results for a single chat model call.""" generations: List[ChatGeneration] """List of the chat generations. This is a List because an input can have multiple candidate generations. """ llm_output: Optional[dict] = None """For arbitrary LLM provider specific output.""" [docs]class LLMResult(BaseModel): """Class that contains all results for a batched LLM call.""" generations: List[List[Generation]] """List of generated outputs. This is a List[List[]] because each input could have multiple candidate generations.""" llm_output: Optional[dict] = None """Arbitrary LLM provider-specific output.""" run: Optional[List[RunInfo]] = None """List of metadata info for model call for each input.""" [docs] def flatten(self) -> List[LLMResult]: """Flatten generations into a single list. Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult contains only a single Generation. If token usage information is available, it is kept only for the LLMResult corresponding to the top-choice
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
f7c3fcd00905-3
it is kept only for the LLMResult corresponding to the top-choice Generation, to avoid over-counting of token usage downstream. Returns: List of LLMResults where each returned LLMResult contains a single Generation. """ llm_results = [] for i, gen_list in enumerate(self.generations): # Avoid double counting tokens in OpenAICallback if i == 0: llm_results.append( LLMResult( generations=[gen_list], llm_output=self.llm_output, ) ) else: if self.llm_output is not None: llm_output = deepcopy(self.llm_output) llm_output["token_usage"] = dict() else: llm_output = None llm_results.append( LLMResult( generations=[gen_list], llm_output=llm_output, ) ) return llm_results def __eq__(self, other: object) -> bool: """Check for LLMResult equality by ignoring any metadata related to runs.""" if not isinstance(other, LLMResult): return NotImplemented return ( self.generations == other.generations and self.llm_output == other.llm_output )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/output.html
8cf9574238da-0
Source code for langchain.schema.callbacks.manager from __future__ import annotations import asyncio import functools import logging import os import uuid from concurrent.futures import ThreadPoolExecutor from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, ) from uuid import UUID from langsmith import utils as ls_utils from langsmith.run_helpers import get_run_tree_context from tenacity import RetryCallState from langchain.schema import ( AgentAction, AgentFinish, Document, LLMResult, ) from langchain.schema.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, Callbacks, ChainManagerMixin, LLMManagerMixin, RetrieverManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.schema.callbacks.stdout import StdOutCallbackHandler from langchain.schema.callbacks.tracers import run_collector from langchain.schema.callbacks.tracers.langchain import ( LangChainTracer, ) from langchain.schema.callbacks.tracers.langchain_v1 import ( LangChainTracerV1, TracerSessionV1, ) from langchain.schema.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.schema.messages import BaseMessage, get_buffer_string from langchain.schema.output import ChatGenerationChunk, GenerationChunk if TYPE_CHECKING: from langsmith import Client as LangSmithClient logger = logging.getLogger(__name__)
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-1
from langsmith import Client as LangSmithClient logger = logging.getLogger(__name__) tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501 "tracing_callback", default=None ) tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) run_collector_var: ContextVar[ Optional[run_collector.RunCollectorCallbackHandler] ] = ContextVar( # noqa: E501 "run_collector", default=None ) def _get_debug() -> bool: from langchain.globals import get_debug return get_debug() [docs]@contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) try: tracing_callback_var.set(cb) yield session finally: tracing_callback_var.set(None) [docs]@contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-2
*, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, client: Optional[LangSmithClient] = None, ) -> Generator[LangChainTracer, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The tags to add to the run. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced You can use this to fetch the LangSmith run URL: >>> with tracing_v2_enabled() as cb: ... chain.invoke("foo") ... run_url = cb.get_run_url() """ if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, tags=tags, client=client, ) try: tracing_v2_callback_var.set(cb) yield cb finally: tracing_v2_callback_var.set(None) [docs]@contextmanager def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]: """Collect all run traces in context. Returns: run_collector.RunCollectorCallbackHandler: The run collector callback handler. Example: >>> with collect_runs() as runs_cb: chain.invoke("foo")
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-3
>>> with collect_runs() as runs_cb: chain.invoke("foo") run_id = runs_cb.traced_runs[0].id """ cb = run_collector.RunCollectorCallbackHandler() run_collector_var.set(cb) yield cb run_collector_var.set(None) def _get_trace_callbacks( project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None, ) -> Callbacks: if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer( project_name=project_name_, example_id=example_id, ) if callback_manager is None: cb = cast(Callbacks, [tracer]) else: if not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): callback_manager.add_handler(tracer, True) # If it already has a LangChainTracer, we don't need to add another one. # this would likely mess up the trace hierarchy. cb = callback_manager else: cb = None return cb [docs]@contextmanager def trace_as_chain_group( group_name: str, callback_manager: Optional[CallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-4
run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManagerForChainGroup, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (CallbackManager, optional): The callback manager to use. inputs (Dict[str, Any], optional): The inputs to the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. Returns: CallbackManagerForChainGroup: The callback manager for the chain group. Example: .. code-block:: python llm_input = "Foo" with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the callback manager for the chain group res = llm.predict(llm_input, callbacks=manager) manager.on_chain_end({"output": res}) """ # noqa: E501 cb = _get_trace_callbacks( project_name, example_id, callback_manager=callback_manager ) cm = CallbackManager.configure( inheritable_callbacks=cb, inheritable_tags=tags, )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-5
inheritable_callbacks=cb, inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id) child_cm = run_manager.get_child() group_cm = CallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: run_manager.on_chain_error(e) raise e else: if not group_cm.ended: run_manager.on_chain_end({}) [docs]@asynccontextmanager async def atrace_as_chain_group( group_name: str, callback_manager: Optional[AsyncCallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (AsyncCallbackManager, optional): The async callback manager to use,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-6
callback_manager (AsyncCallbackManager, optional): The async callback manager to use, which manages tracing and other callback behavior. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. Example: .. code-block:: python llm_input = "Foo" async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the async callback manager for the chain group res = await llm.apredict(llm_input, callbacks=manager) await manager.on_chain_end({"output": res}) """ # noqa: E501 cb = _get_trace_callbacks( project_name, example_id, callback_manager=callback_manager ) cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags) run_manager = await cm.on_chain_start( {"name": group_name}, inputs or {}, run_id=run_id ) child_cm = run_manager.get_child() group_cm = AsyncCallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-7
inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: await run_manager.on_chain_error(e) raise e else: if not group_cm.ended: await run_manager.on_chain_end({}) [docs]def handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager. Note: This function is used by langserve to handle events. Args: handlers: The list of handlers that will handle the event event_name: The name of the event (e.g., "on_llm_start") ignore_condition_name: Name of the attribute defined on handler that if True will cause the handler to be skipped for the given event *args: The arguments to pass to the event handler **kwargs: The keyword arguments to pass to the event handler """ coros: List[Coroutine[Any, Any, Any]] = [] try: message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): event = getattr(handler, event_name)(*args, **kwargs) if asyncio.iscoroutine(event): coros.append(event) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-8
if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: handler_name = handler.__class__.__name__ logger.warning( f"NotImplementedError in {handler_name}.{event_name}" f" callback: {repr(e)}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback:" f" {repr(e)}" ) if handler.raise_error: raise e finally: if coros: try: # Raises RuntimeError if there is no current event loop. asyncio.get_running_loop() loop_running = True except RuntimeError: loop_running = False if loop_running: # If we try to submit this coroutine to the running loop # we end up in a deadlock, as we'd have gotten here from a # running coroutine, which we cannot interrupt to run this one. # The solution is to create a new loop in a new thread. with ThreadPoolExecutor(1) as executor: executor.submit(_run_coros, coros).result() else: _run_coros(coros) def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None: if hasattr(asyncio, "Runner"): # Python 3.11+
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-9
if hasattr(asyncio, "Runner"): # Python 3.11+ # Run the coroutines in a new event loop, taking care to # - install signal handlers # - run pending tasks scheduled by `coros` # - close asyncgens and executors # - close the loop with asyncio.Runner() as runner: # Run the coroutine, get the result for coro in coros: runner.run(coro) # Run pending tasks scheduled by coros until they are all done while pending := asyncio.all_tasks(runner.get_loop()): runner.run(asyncio.wait(pending)) else: # Before Python 3.11 we need to run each coroutine in a new event loop # as the Runner api is not available. for coro in coros: asyncio.run(coro) async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]]
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-10
message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"NotImplementedError in {handler.__class__.__name__}.{event_name}" f" callback: {repr(e)}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback:" f" {repr(e)}" ) if handler.raise_error: raise e [docs]async def ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager. Note: This function is used by langserve to handle events. Args: handlers: The list of handlers that will handle the event event_name: The name of the event (e.g., "on_llm_start") ignore_condition_name: Name of the attribute defined on handler that if True will cause the handler to be skipped for the given event *args: The arguments to pass to the event handler **kwargs: The keyword arguments to pass to the event handler """ for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-11
) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") [docs]class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" [docs] def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (Optional[List[str]]): The list of tags. inheritable_tags (Optional[List[str]]): The list of inheritable tags. metadata (Optional[Dict[str, Any]]): The metadata. inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-12
self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {} [docs] @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid.uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], metadata={}, inheritable_metadata={}, ) [docs]class RunManager(BaseRunManager): """Sync Run Manager.""" [docs] def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: handle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-13
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class ParentRunManager(RunManager): """Sync Parent Run Manager.""" [docs] def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager [docs]class AsyncRunManager(BaseRunManager): """Async Run Manager.""" [docs] async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: await ahandle_event( self.handlers,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-14
) -> None: await ahandle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class AsyncParentRunManager(AsyncRunManager): """Async Parent Run Manager.""" [docs] def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager [docs]class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" [docs] def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-15
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, chunk=chunk, **kwargs, ) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" [docs] async def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-16
Args: token (str): The new token. """ await ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, chunk=chunk, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin): """Callback manager for chain run.""" [docs] def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when chain ends running.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-17
"""Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-18
Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin): """Async callback manager for chain run.""" [docs] async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ await ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-19
"""Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin): """Callback manager for tool run.""" [docs] def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_tool_error( self, error: BaseException,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-20
self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin): """Async callback manager for tool run.""" [docs] async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ await ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_tool_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-21
"""Callback manager for retriever run.""" [docs] def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any, ) -> None: """Run when retriever ends running.""" handle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" handle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class AsyncCallbackManagerForRetrieverRun( AsyncParentRunManager, RetrieverManagerMixin, ): """Async callback manager for retriever run.""" [docs] async def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any ) -> None: """Run when retriever ends running.""" await ahandle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs] async def on_retriever_error( self,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-22
) [docs] async def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" await ahandle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) [docs]class CallbackManager(BaseCallbackManager): """Callback manager that handles callbacks from LangChain.""" [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid.uuid4() handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-23
CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers [docs] def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid.uuid4() handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-24
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] def on_tool_start( self, serialized: Dict[str, Any],
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-25
self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-26
parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-27
verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, ) [docs]class CallbackManagerForChainGroup(CallbackManager): """Callback manager for the chain group.""" [docs] def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]] = None, parent_run_id: Optional[UUID] = None, *, parent_run_manager: CallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False [docs] def copy(self) -> CallbackManagerForChainGroup: return self.__class__( handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-28
tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, parent_run_manager=self.parent_run_manager, ) [docs] def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True return self.parent_run_manager.on_chain_end(outputs, **kwargs) [docs] def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True return self.parent_run_manager.on_chain_error(error, **kwargs) [docs]class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that handles callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True [docs] async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-29
Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid.uuid4() tasks.append( ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers [docs] async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-30
Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid.uuid4() tasks.append( ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers [docs] async def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-31
Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() await ahandle_event(
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-32
run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] async def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags,
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-33
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) [docs] @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, )
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-34
local_tags, inheritable_metadata, local_metadata, ) [docs]class AsyncCallbackManagerForChainGroup(AsyncCallbackManager): """Async callback manager for the chain group.""" [docs] def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]] = None, parent_run_id: Optional[UUID] = None, *, parent_run_manager: AsyncCallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False [docs] def copy(self) -> AsyncCallbackManagerForChainGroup: return self.__class__( handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, parent_run_manager=self.parent_run_manager, ) [docs] async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True await self.parent_run_manager.on_chain_end(outputs, **kwargs) [docs] async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-35
error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True await self.parent_run_manager.on_chain_error(error, **kwargs) T = TypeVar("T", CallbackManager, AsyncCallbackManager) [docs]def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _tracing_v2_is_enabled() -> bool: return ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracing_v2_callback_var.get() is not None or get_run_tree_context() is not None ) def _get_tracer_project() -> str: run_tree = get_run_tree_context() return getattr( run_tree, "session_name", getattr( # Note, if people are trying to nest @traceable functions and the # tracing_v2_enabled context manager, this will likely mess up the # tree structure. tracing_v2_callback_var.get(), "project", # Have to set this to a string even though it always will return # a string because `get_tracer_project` technically can return # None, but only when a specific argument is supplied. # Therefore, this just tricks the mypy type checker
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-36
# Therefore, this just tricks the mypy type checker str(ls_utils.get_tracer_project()), ), ) _configure_hooks: List[ Tuple[ ContextVar[Optional[BaseCallbackHandler]], bool, Optional[Type[BaseCallbackHandler]], Optional[str], ] ] = [] H = TypeVar("H", bound=BaseCallbackHandler, covariant=True) [docs]def register_configure_hook( context_var: ContextVar[Optional[Any]], inheritable: bool, handle_class: Optional[Type[BaseCallbackHandler]] = None, env_var: Optional[str] = None, ) -> None: if env_var is not None and handle_class is None: raise ValueError( "If env_var is set, handle_class must also be set to a non-None value." ) _configure_hooks.append( ( # the typings of ContextVar do not have the generic arg set as covariant # so we have to cast it cast(ContextVar[Optional[BaseCallbackHandler]], context_var), inheritable, handle_class, env_var, ) ) register_configure_hook(run_collector_var, False) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> T: """Configure the callback manager. Args:
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html
8cf9574238da-37
) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: T: The configured callback manager. """ run_tree = get_run_tree_context() parent_run_id = None if run_tree is None else getattr(run_tree, "id") callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), parent_run_id=parent_run_id, ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers.copy(), inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags.copy(),
lang/api.python.langchain.com/en/latest/_modules/langchain/schema/callbacks/manager.html