id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
7b4a896e6191-5
if batch_size == 0: return [] batched = [ elements[i : i + batch_size] for i in range(0, len(elements), batch_size) ] ingest().eval( batched, self.ds, num_workers=min(self.num_workers, len(batched) // max(self.num_workers, 1)), **kwargs, ) self.ds.commit(allow_empty=True) if self.verbose: self.ds.summary() return ids def _search_helper( self, query: Any[str, None] = None, embedding: Any[float, None] = None, k: int = 4, distance_metric: str = "L2", use_maximal_marginal_relevance: Optional[bool] = False, fetch_k: Optional[int] = 20, filter: Optional[Any[Dict[str, str], Callable, str]] = None, return_score: Optional[bool] = False, **kwargs: Any, ) -> Any[List[Document], List[Tuple[Document, float]]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. embedding: Embedding function to use. Defaults to None. k: Number of Documents to return. Defaults to 4. distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. filter: Attribute filter by metadata example {'key': 'value'}. It can also take [Deep Lake filter]
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-6
take [Deep Lake filter] (https://docs.deeplake.ai/en/latest/deeplake.core.dataset.html#deeplake.core.dataset.Dataset.filter) Defaults to None. maximal_marginal_relevance: Whether to use maximal marginal relevance. Defaults to False. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score: Whether to return the score. Defaults to False. Returns: List of Documents selected by the specified distance metric, if return_score True, return a tuple of (Document, score) """ view = self.ds # attribute based filtering if filter is not None: if isinstance(filter, dict): filter = partial(dp_filter, filter=filter) view = view.filter(filter) if len(view) == 0: return [] if self._embedding_function is None: view = view.filter(lambda x: query in x["text"].data()["value"]) scores = [1.0] * len(view) if use_maximal_marginal_relevance: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) else: emb = embedding or self._embedding_function.embed_query( query ) # type: ignore query_emb = np.array(emb, dtype=np.float32) embeddings = view.embedding.numpy(fetch_chunks=True) k_search = fetch_k if use_maximal_marginal_relevance else k indices, scores = vector_search( query_emb, embeddings, k=k_search, distance_metric=distance_metric.lower(), ) view = view[indices]
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-7
distance_metric=distance_metric.lower(), ) view = view[indices] if use_maximal_marginal_relevance: lambda_mult = kwargs.get("lambda_mult", 0.5) indices = maximal_marginal_relevance( query_emb, embeddings[indices], k=min(k, len(indices)), lambda_mult=lambda_mult, ) view = view[indices] scores = [scores[i] for i in indices] docs = [ Document( page_content=el["text"].data()["value"], metadata=el["metadata"].data()["value"], ) for el in view ] if return_score: return [(doc, score) for doc, score in zip(docs, scores)] return docs [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: text to embed and run the query on. k: Number of Documents to return. Defaults to 4. query: Text to look up documents similar to. embedding: Embedding function to use. Defaults to None. k: Number of Documents to return. Defaults to 4. distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity distance, `cos` for cosine similarity, 'dot' for dot product Defaults to `L2`. filter: Attribute filter by metadata example {'key': 'value'}. Defaults to None. maximal_marginal_relevance: Whether to use maximal marginal relevance.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-8
maximal_marginal_relevance: Whether to use maximal marginal relevance. Defaults to False. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score: Whether to return the score. Defaults to False. Returns: List of Documents most similar to the query vector. """ return self._search_helper(query=query, k=k, **kwargs) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ return self._search_helper(embedding=embedding, k=k, **kwargs) [docs] def similarity_search_with_score( self, query: str, distance_metric: str = "L2", k: int = 4, filter: Optional[Dict[str, str]] = None, ) -> List[Tuple[Document, float]]: """Run similarity search with Deep Lake with distance returned. Args: query (str): Query text to search for. distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. k (int): Number of results to return. Defaults to 4.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-9
k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ return self._search_helper( query=query, k=k, filter=filter, return_score=True, distance_metric=distance_metric, ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ return self._search_helper( embedding=embedding, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, **kwargs, ) [docs] def max_marginal_relevance_search(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-10
) [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) return self._search_helper( query=query, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, **kwargs, ) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, **kwargs: Any,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-11
**kwargs: Any, ) -> DeepLake: """Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at `./deeplake` Args: path (str, pathlib.Path): - The full path to the dataset. Can be: - Deep Lake cloud path of the form ``hub://username/dataset_name``. To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use 'activeloop login' from command line) - AWS S3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment - Google Cloud Storage path of the form ``gcs://bucketname/path/to/dataset`` Credentials are required in either the environment - Local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``. - In-memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist. documents (List[Document]): List of documents to add. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. Returns: DeepLake: Deep Lake dataset. """ deeplake_dataset = cls( dataset_path=dataset_path, embedding_function=embedding, **kwargs )
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-12
dataset_path=dataset_path, embedding_function=embedding, **kwargs ) deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids) return deeplake_dataset [docs] def delete( self, ids: Any[List[str], None] = None, filter: Any[Dict[str, str], None] = None, delete_all: Any[bool, None] = None, ) -> bool: """Delete the entities in the dataset Args: ids (Optional[List[str]], optional): The document_ids to delete. Defaults to None. filter (Optional[Dict[str, str]], optional): The filter to delete by. Defaults to None. delete_all (Optional[bool], optional): Whether to drop the dataset. Defaults to None. """ if delete_all: self.ds.delete(large_ok=True) return True view = None if ids: view = self.ds.filter(lambda x: x["ids"].data()["value"] in ids) ids = list(view.sample_indices) if filter: if view is None: view = self.ds view = view.filter(partial(dp_filter, filter=filter)) ids = list(view.sample_indices) with self.ds: for id in sorted(ids)[::-1]: self.ds.pop(id) self.ds.commit(f"deleted {len(ids)} samples", allow_empty=True) return True [docs] @classmethod def force_delete_by_path(cls, path: str) -> None: """Force delete dataset by path""" try: import deeplake except ImportError: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
7b4a896e6191-13
try: import deeplake except ImportError: raise ValueError( "Could not import deeplake python package. " "Please install it with `pip install deeplake`." ) deeplake.delete(path, large_ok=True, force=True) [docs] def delete_dataset(self) -> None: """Delete the collection.""" self.delete(delete_all=True) [docs] def persist(self) -> None: """Persist the collection.""" self.ds.flush() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
582eead4a32f-0
Source code for langchain.vectorstores.matching_engine """Vertex Matching Engine implementation of the vector store.""" from __future__ import annotations import json import logging import time import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type from langchain.docstore.document import Document from langchain.embeddings import TensorflowHubEmbeddings from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from google.cloud import storage from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint from google.oauth2.service_account import Credentials logger = logging.getLogger() [docs]class MatchingEngine(VectorStore): """Vertex Matching Engine implementation of the vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour.""" def __init__( self, project_id: str, index: MatchingEngineIndex, endpoint: MatchingEngineIndexEndpoint, embedding: Embeddings, gcs_client: storage.Client, gcs_bucket_name: str, credentials: Optional[Credentials] = None, ): """Vertex Matching Engine implementation of the vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-1
using this module. See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb. Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour. Attributes: project_id: The GCS project id. index: The created index class. See ~:func:`MatchingEngine.from_components`. endpoint: The created endpoint class. See ~:func:`MatchingEngine.from_components`. embedding: A :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. gcs_client: The GCS client. gcs_bucket_name: The GCS bucket name. credentials (Optional): Created GCP credentials. """ super().__init__() self._validate_google_libraries_installation() self.project_id = project_id self.index = index self.endpoint = endpoint self.embedding = embedding self.gcs_client = gcs_client self.credentials = credentials self.gcs_bucket_name = gcs_bucket_name def _validate_google_libraries_installation(self) -> None: """Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage # noqa: F401 from google.oauth2 import service_account # noqa: F401 except ImportError: raise ImportError( "You must run `pip install --upgrade " "google-cloud-aiplatform google-cloud-storage`" "to use the MatchingEngine Vectorstore." )
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-2
"to use the MatchingEngine Vectorstore." ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters. Returns: List of ids from adding the texts into the vectorstore. """ logger.debug("Embedding documents.") embeddings = self.embedding.embed_documents(list(texts)) jsons = [] ids = [] # Could be improved with async. for embedding, text in zip(embeddings, texts): id = str(uuid.uuid4()) ids.append(id) jsons.append({"id": id, "embedding": embedding}) self._upload_to_gcs(text, f"documents/{id}") logger.debug(f"Uploaded {len(ids)} documents to GCS.") # Creating json lines from the embedded documents. result_str = "\n".join([json.dumps(x) for x in jsons]) filename_prefix = f"indexes/{uuid.uuid4()}" filename = f"{filename_prefix}/{time.time()}.json" self._upload_to_gcs(result_str, filename) logger.debug( f"Uploaded updated json with embeddings to " f"{self.gcs_bucket_name}/{filename}." ) self.index = self.index.update_embeddings( contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/" )
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-3
) logger.debug("Updated index with new configuration.") return ids def _upload_to_gcs(self, data: str, gcs_location: str) -> None: """Uploads data to gcs_location. Args: data: The data that will be stored. gcs_location: The location where the data will be stored. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) blob.upload_from_string(data) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: The string that will be used to search for similar documents. k: The amount of neighbors that will be retrieved. Returns: A list of k matching documents. """ logger.debug(f"Embedding query {query}.") embedding_query = self.embedding.embed_documents([query]) response = self.endpoint.match( deployed_index_id=self._get_index_id(), queries=embedding_query, num_neighbors=k, ) if len(response) == 0: return [] logger.debug(f"Found {len(response)} matches for the query {query}.") results = [] # I'm only getting the first one because queries receives an array # and the similarity_search method only recevies one query. This # means that the match method will always return an array with only # one element. for doc in response[0]: page_content = self._download_from_gcs(f"documents/{doc.id}")
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-4
page_content = self._download_from_gcs(f"documents/{doc.id}") results.append(Document(page_content=page_content)) logger.debug("Downloaded documents for query.") return results def _get_index_id(self) -> str: """Gets the correct index id for the endpoint. Returns: The index id if found (which should be found) or throws ValueError otherwise. """ for index in self.endpoint.deployed_indexes: if index.index == self.index.resource_name: return index.id raise ValueError( f"No index with id {self.index.resource_name} " f"deployed on endpoint " f"{self.endpoint.display_name}." ) def _download_from_gcs(self, gcs_location: str) -> str: """Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string() [docs] @classmethod def from_texts( cls: Type["MatchingEngine"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "MatchingEngine": """Use from components instead.""" raise NotImplementedError( "This method is not implemented. Instead, you should initialize the class" " with `MatchingEngine.from_components(...)` and then call " "`add_texts`" ) [docs] @classmethod def from_components(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-5
) [docs] @classmethod def from_components( cls: Type["MatchingEngine"], project_id: str, region: str, gcs_bucket_name: str, index_id: str, endpoint_id: str, credentials_path: Optional[str] = None, embedding: Optional[Embeddings] = None, ) -> "MatchingEngine": """Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. Returns: A configured MatchingEngine with the texts added to the index. """ gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name) credentials = cls._create_credentials_from_file(credentials_path) index = cls._create_index_by_id(index_id, project_id, region, credentials) endpoint = cls._create_endpoint_by_id( endpoint_id, project_id, region, credentials ) gcs_client = cls._get_gcs_client(credentials, project_id) cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials) return cls( project_id=project_id, index=index,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-6
return cls( project_id=project_id, index=index, endpoint=endpoint, embedding=embedding or cls._get_default_embeddings(), gcs_client=gcs_client, credentials=credentials, gcs_bucket_name=gcs_bucket_name, ) @classmethod def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str: """Validates the gcs_bucket_name as a bucket name. Args: gcs_bucket_name: The received bucket uri. Returns: A valid gcs_bucket_name or throws ValueError if full path is provided. """ gcs_bucket_name = gcs_bucket_name.replace("gs://", "") if "/" in gcs_bucket_name: raise ValueError( f"The argument gcs_bucket_name should only be " f"the bucket name. Received {gcs_bucket_name}" ) return gcs_bucket_name @classmethod def _create_credentials_from_file( cls, json_credentials_path: Optional[str] ) -> Optional[Credentials]: """Creates credentials for GCP. Args: json_credentials_path: The path on the file system where the credentials are stored. Returns: An optional of Credentials or None, in which case the default will be used. """ from google.oauth2 import service_account credentials = None if json_credentials_path is not None: credentials = service_account.Credentials.from_service_account_file( json_credentials_path ) return credentials @classmethod def _create_index_by_id( cls, index_id: str, project_id: str, region: str, credentials: "Credentials"
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-7
) -> MatchingEngineIndex: """Creates a MatchingEngineIndex object by id. Args: index_id: The created index id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndex. """ from google.cloud import aiplatform logger.debug(f"Creating matching engine index with id {index_id}.") return aiplatform.MatchingEngineIndex( index_name=index_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _create_endpoint_by_id( cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials" ) -> MatchingEngineIndexEndpoint: """Creates a MatchingEngineIndexEndpoint object by id. Args: endpoint_id: The created endpoint id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndexEndpoint. """ from google.cloud import aiplatform logger.debug(f"Creating endpoint with id {endpoint_id}.") return aiplatform.MatchingEngineIndexEndpoint( index_endpoint_name=endpoint_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _get_gcs_client( cls, credentials: "Credentials", project_id: str ) -> "storage.Client": """Lazily creates a GCS client. Returns: A configured GCS client. """ from google.cloud import storage
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
582eead4a32f-8
A configured GCS client. """ from google.cloud import storage return storage.Client(credentials=credentials, project=project_id) @classmethod def _init_aiplatform( cls, project_id: str, region: str, gcs_bucket_name: str, credentials: "Credentials", ) -> None: """Configures the aiplatform library. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: GCS staging location. credentials: The GCS Credentials object. """ from google.cloud import aiplatform logger.debug( f"Initializing AI Platform for project {project_id} on " f"{region} and for {gcs_bucket_name}." ) aiplatform.init( project=project_id, location=region, staging_bucket=gcs_bucket_name, credentials=credentials, ) @classmethod def _get_default_embeddings(cls) -> TensorflowHubEmbeddings: """This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use. """ return TensorflowHubEmbeddings() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
b6f86bbcbcab-0
Source code for langchain.vectorstores.awadb """Wrapper around AwaDB for embedding vectors""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore # from pydantic import BaseModel, Field, root_validator if TYPE_CHECKING: import awadb logger = logging.getLogger() DEFAULT_TOPN = 4 [docs]class AwaDB(VectorStore): """Interface implemented by AwaDB vector stores.""" _DEFAULT_TABLE_NAME = "langchain_awadb" def __init__( self, table_name: str = _DEFAULT_TABLE_NAME, embedding_model: Optional[Embeddings] = None, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, ) -> None: """Initialize with AwaDB client.""" try: import awadb except ImportError: raise ValueError( "Could not import awadb python package. " "Please install it with `pip install awadb`." ) if client is not None: self.awadb_client = client else: if log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() self.awadb_client.Create(table_name) if embedding_model is not None: self.embedding_model = embedding_model self.added_doc_count = 0 [docs] def add_texts( self, texts: Iterable[str],
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
b6f86bbcbcab-1
[docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embeddings = None if self.embedding_model is not None: embeddings = self.embedding_model.embed_documents(list(texts)) added_results: List[str] = [] doc_no = 0 for text in texts: doc: List[Any] = [] if embeddings is not None: doc.append(text) doc.append(embeddings[doc_no]) else: dict_tmp = {} dict_tmp["embedding_text"] = text doc.append(dict_tmp) if metadatas is not None: if doc_no < metadatas.__len__(): doc.append(metadatas[doc_no]) self.awadb_client.Add(doc) added_results.append(str(self.added_doc_count)) doc_no = doc_no + 1 self.added_doc_count = self.added_doc_count + 1 return added_results [docs] def load_local( self, table_name: str = _DEFAULT_TABLE_NAME, **kwargs: Any, ) -> bool: if self.awadb_client is None:
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
b6f86bbcbcab-2
) -> bool: if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.Load(table_name) [docs] def similarity_search( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.embedding_model is not None: embedding = self.embedding_model.embed_query(query) return self.similarity_search_by_vector(embedding, k) [docs] def similarity_search_with_score( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.embedding_model is not None: embedding = self.embedding_model.embed_query(query) show_results = self.awadb_client.Search(embedding, k) results: List[Tuple[Document, float]] = [] if show_results.__len__() == 0: return results scores: List[float] = [] retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) L2_Norm = 0.0 for score in scores: L2_Norm = L2_Norm + score * score
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
b6f86bbcbcab-3
L2_Norm = L2_Norm + score * score L2_Norm = pow(L2_Norm, 0.5) doc_no = 0 for doc in retrieval_docs: doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) results.append(doc_tuple) doc_no = doc_no + 1 return results [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.embedding_model is not None: embedding = self.embedding_model.embed_query(query) show_results = self.awadb_client.Search(embedding, k) results: List[Tuple[Document, float]] = [] if show_results.__len__() == 0: return results scores: List[float] = [] retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) L2_Norm = 0.0 for score in scores: L2_Norm = L2_Norm + score * score L2_Norm = pow(L2_Norm, 0.5) doc_no = 0 for doc in retrieval_docs: doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) results.append(doc_tuple)
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
b6f86bbcbcab-4
results.append(doc_tuple) doc_no = doc_no + 1 return results [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_TOPN, scores: Optional[list] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") show_results = self.awadb_client.Search(embedding, k) results: List[Document] = [] if show_results.__len__() == 0: return results for item_detail in show_results[0]["ResultItems"]: content = "" meta_data = {} for item_key in item_detail: if item_key == "Field@0": # text for the document content = item_detail[item_key] elif item_key == "Field@1": # embedding field for the document continue elif item_key == "score": # L2 distance if scores is not None: score = item_detail[item_key] scores.append(score) else: meta_data[item_key] = item_detail[item_key] results.append(Document(page_content=content, metadata=meta_data)) return results [docs] @classmethod def from_texts( cls: Type[AwaDB], texts: List[str], embedding: Optional[Embeddings] = None,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
b6f86bbcbcab-5
texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, table_name: str = _DEFAULT_TABLE_NAME, logging_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a raw documents. Args: texts (List[str]): List of texts to add to the table. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. table_name (str): Name of the table to create. logging_and_data_dir (Optional[str]): Directory of logging and persistence. client (Optional[awadb.Client]): AwaDB client Returns: AwaDB: AwaDB vectorstore. """ awadb_client = cls( table_name=table_name, embedding_model=embedding, log_and_data_dir=logging_and_data_dir, client=client, ) awadb_client.add_texts(texts=texts, metadatas=metadatas) return awadb_client By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
3ba445096b48-0
Source code for langchain.vectorstores.annoy """Wrapper around Annoy vector database.""" from __future__ import annotations import os import pickle import uuid from configparser import ConfigParser from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"]) DEFAULT_METRIC = "angular" def dependable_annoy_import() -> Any: """Import annoy if available, otherwise raise error.""" try: import annoy except ImportError: raise ValueError( "Could not import annoy python package. " "Please install it with `pip install --user annoy` " ) return annoy [docs]class Annoy(VectorStore): """Wrapper around Annoy vector database. To use, you should have the ``annoy`` python package installed. Example: .. code-block:: python from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable, index: Any, metric: str, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """Initialize with necessary components.""" self.embedding_function = embedding_function
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-1
): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.metric = metric self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError( "Annoy does not allow to add new data once the index is build." ) [docs] def process_index_results( self, idxs: List[int], dists: List[float] ) -> List[Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, dist)) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-2
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_item( docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-3
k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to docstore_index. Args: docstore_index: Index of document in docstore k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-4
Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. fetch_k: Number of Documents to fetch to pass to MMR algorithm. k: Number of Documents to return. Defaults to 4. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-5
of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-6
k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = dependable_annoy_import() if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts):
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-7
documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-8
from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-9
text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] def save_local(self, folder_path: str, prefault: bool = False) -> None: """Save Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. prefault: Whether to pre-load the index into memory. """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file) [docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, ) -> Annoy: """Load Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
3ba445096b48-10
Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries. """ path = Path(folder_path) # load index separately since it is not picklable annoy = dependable_annoy_import() # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: docstore, index_to_docstore_id, config_object = pickle.load(file) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
8078984a02a5-0
Source code for langchain.vectorstores.docarray.in_memory """Wrapper around in-memory storage.""" from __future__ import annotations from typing import Any, Dict, List, Literal, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) [docs]class DocArrayInMemorySearch(DocArrayIndex): """Wrapper around in-memory storage for exact search. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ [docs] @classmethod def from_params( cls, embedding: Embeddings, metric: Literal[ "cosine_sim", "euclidian_dist", "sgeuclidean_dist" ] = "cosine_sim", **kwargs: Any, ) -> DocArrayInMemorySearch: """Initialize DocArrayInMemorySearch store. Args: embedding (Embeddings): Embedding function. metric (str): metric for exact nearest-neighbor search. Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist". Defaults to "cosine_sim". **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import InMemoryExactNNIndex doc_cls = cls._get_doc_cls(space=metric, **kwargs) doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore return cls(doc_index, embedding) [docs] @classmethod def from_texts(
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/in_memory.html
8078984a02a5-1
[docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> DocArrayInMemorySearch: """Create an DocArrayInMemorySearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text if it exists. Defaults to None. metric (str): metric for exact nearest-neighbor search. Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist". Defaults to "cosine_sim". Returns: DocArrayInMemorySearch Vector Store """ store = cls.from_params(embedding, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/in_memory.html
0e3314b830fd-0
Source code for langchain.vectorstores.docarray.hnsw """Wrapper around Hnswlib store.""" from __future__ import annotations from typing import Any, List, Literal, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) [docs]class DocArrayHnswSearch(DocArrayIndex): """Wrapper around HnswLib storage. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ [docs] @classmethod def from_params( cls, embedding: Embeddings, work_dir: str, n_dim: int, dist_metric: Literal["cosine", "ip", "l2"] = "cosine", max_elements: int = 1024, index: bool = True, ef_construction: int = 200, ef: int = 10, M: int = 16, allow_replace_deleted: bool = True, num_threads: int = 1, **kwargs: Any, ) -> DocArrayHnswSearch: """Initialize DocArrayHnswSearch store. Args: embedding (Embeddings): Embedding function. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. dist_metric (str): Distance metric for DocArrayHnswSearch can be one of: "cosine", "ip", and "l2". Defaults to "cosine".
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html
0e3314b830fd-1
"cosine", "ip", and "l2". Defaults to "cosine". max_elements (int): Maximum number of vectors that can be stored. Defaults to 1024. index (bool): Whether an index should be built for this field. Defaults to True. ef_construction (int): defines a construction time/accuracy trade-off. Defaults to 200. ef (int): parameter controlling query time/accuracy trade-off. Defaults to 10. M (int): parameter that defines the maximum number of outgoing connections in the graph. Defaults to 16. allow_replace_deleted (bool): Enables replacing of deleted elements with new added ones. Defaults to True. num_threads (int): Sets the number of cpu threads to use. Defaults to 1. **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import HnswDocumentIndex doc_cls = cls._get_doc_cls( dim=n_dim, space=dist_metric, max_elements=max_elements, index=index, ef_construction=ef_construction, ef=ef, M=M, allow_replace_deleted=allow_replace_deleted, num_threads=num_threads, **kwargs, ) doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore return cls(doc_index, embedding) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, work_dir: Optional[str] = None,
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html
0e3314b830fd-2
work_dir: Optional[str] = None, n_dim: Optional[int] = None, **kwargs: Any, ) -> DocArrayHnswSearch: """Create an DocArrayHnswSearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. **kwargs: Other keyword arguments to be passed to the __init__ method. Returns: DocArrayHnswSearch Vector Store """ if work_dir is None: raise ValueError("`work_dir` parameter has not been set.") if n_dim is None: raise ValueError("`n_dim` parameter has not been set.") store = cls.from_params(embedding, work_dir, n_dim, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html
31fd9adfb983-0
Source code for langchain.llms.llamacpp """Wrapper around llama.cpp.""" import logging from typing import Any, Dict, Generator, List, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class LlamaCpp(LLM): """Wrapper around the llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.llms import LlamaCppEmbeddings llm = LlamaCppEmbeddings(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv")
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-1
f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = []
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-2
"""Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" streaming: bool = True """Whether to stream the results, token by token.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, **model_params) except ImportError: raise ModuleNotFoundError(
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-3
except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" return { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llama.cpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-4
Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager): combined_text_output += token["choices"][0]["text"] return combined_text_output else: params = self._get_parameters(stop) result = self.client(prompt=prompt, **params)
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-5
result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] [docs] def stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> Generator[Dict, None, None]: """Yields results objects as they are generated in real time. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = self._get_parameters(stop) result = self.client(prompt=prompt, stream=True, **params) for chunk in result: token = chunk["choices"][0]["text"]
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
31fd9adfb983-6
for chunk in result: token = chunk["choices"][0]["text"] log_probs = chunk["choices"][0].get("logprobs", None) if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield chunk [docs] def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
b44701dcae4f-0
Source code for langchain.llms.writer """Wrapper around Writer APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Writer(LLM): """Wrapper around Writer large language models. To use, you should have the environment variable ``WRITER_API_KEY`` and ``WRITER_ORG_ID`` set with your API key and organization ID respectively. Example: .. code-block:: python from langchain import Writer writer = Writer(model_id="palmyra-base") """ writer_org_id: Optional[str] = None """Writer organization ID.""" model_id: str = "palmyra-instruct" """Model name to use.""" min_tokens: Optional[int] = None """Minimum number of tokens to generate.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" temperature: Optional[float] = None """What sampling temperature to use.""" top_p: Optional[float] = None """Total probability mass of tokens to consider at each step.""" stop: Optional[List[str]] = None """Sequences when completion generation will stop.""" presence_penalty: Optional[float] = None """Penalizes repeated tokens regardless of frequency.""" repetition_penalty: Optional[float] = None """Penalizes repeated tokens according to frequency.""" best_of: Optional[int] = None """Generates this many completions server-side and returns the "best".""" logprobs: bool = False
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
b44701dcae4f-1
logprobs: bool = False """Whether to return log probabilities.""" n: Optional[int] = None """How many completions to generate.""" writer_api_key: Optional[str] = None """Writer API key.""" base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and organization id exist in environment.""" writer_api_key = get_from_dict_or_env( values, "writer_api_key", "WRITER_API_KEY" ) values["writer_api_key"] = writer_api_key writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID") values["writer_org_id"] = writer_org_id return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return { "minTokens": self.min_tokens, "maxTokens": self.max_tokens, "temperature": self.temperature, "topP": self.top_p, "stop": self.stop, "presencePenalty": self.presence_penalty, "repetitionPenalty": self.repetition_penalty, "bestOf": self.best_of, "logprobs": self.logprobs, "n": self.n, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
b44701dcae4f-2
"""Get the identifying parameters.""" return { **{"model_id": self.model_id, "writer_org_id": self.writer_org_id}, **self._default_params, } @property def _llm_type(self) -> str: """Return type of llm.""" return "writer" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.") """ if self.base_url is not None: base_url = self.base_url else: base_url = ( "https://enterprise-api.writer.com/llm" f"/organization/{self.writer_org_id}" f"/model/{self.model_id}/completions" ) response = requests.post( url=base_url, headers={ "Authorization": f"{self.writer_api_key}", "Content-Type": "application/json", "Accept": "application/json", }, json={"prompt": prompt, **self._default_params}, ) text = response.text if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
b44701dcae4f-3
return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/writer.html
c5671f6bebf9-0
Source code for langchain.llms.bedrock import json from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class LLMInputOutputAdapter: """Adapter class to prepare the inputs from Langchain to a format that LLM model expects. Also, provides helper function to extract the generated text from the model response.""" @classmethod def prepare_input( cls, provider: str, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: input_body = {**model_kwargs} if provider == "anthropic" or provider == "ai21": input_body["prompt"] = prompt elif provider == "amazon": input_body = dict() input_body["inputText"] = prompt input_body["textGenerationConfig"] = {**model_kwargs} else: input_body["inputText"] = prompt if provider == "anthropic" and "max_tokens_to_sample" not in input_body: input_body["max_tokens_to_sample"] = 50 return input_body @classmethod def prepare_output(cls, provider: str, response: Any) -> str: if provider == "anthropic": response_body = json.loads(response.get("body").read().decode()) return response_body.get("completion") else: response_body = json.loads(response.get("body").read()) if provider == "ai21": return response_body.get("completions")[0].get("data").get("text") else:
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
c5671f6bebf9-1
else: return response_body.get("results")[0].get("outputText") [docs]class Bedrock(LLM): """LLM provider to invoke Bedrock models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from bedrock_langchain.bedrock_llm import BedrockLLM llm = BedrockLLM( credentials_profile_name="default", model_id="amazon.titan-tg1-large" ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str """Id of the model to call, e.g., amazon.titan-tg1-large, this is
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
c5671f6bebf9-2
equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" # Skip creating new client if passed in constructor if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm."""
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
c5671f6bebf9-3
"""Return type of llm.""" return "amazon_bedrock" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Bedrock service model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} provider = self.model_id.split(".")[0] input_body = LLMInputOutputAdapter.prepare_input( provider, prompt, _model_kwargs ) body = json.dumps(input_body) accept = "application/json" contentType = "application/json" try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accept, contentType=contentType ) text = LLMInputOutputAdapter.prepare_output(provider, response) except Exception as e: raise ValueError(f"Error raised by bedrock service: {e}") if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
8605d99651e1-0
Source code for langchain.llms.gpt4all """Wrapper for the GPT4All model.""" from functools import partial from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class GPT4All(LLM): r"""Wrapper around GPT4All language models. To use, you should have the ``gpt4all`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import GPT4All model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8) # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained GPT4All model file.""" backend: Optional[str] = Field(None, alias="backend") n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(0, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all")
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
8605d99651e1-1
logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" embedding: bool = Field(False, alias="embedding") """Use embedding mode only.""" n_threads: Optional[int] = Field(4, alias="n_threads") """Number of threads to use.""" n_predict: Optional[int] = 256 """The maximum number of tokens to generate.""" temp: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_last_n: Optional[int] = 64 "Last n tokens to penalize" repeat_penalty: Optional[float] = 1.3 """The penalty to apply to repeated tokens.""" n_batch: int = Field(1, alias="n_batch") """Batch size for prompt processing.""" streaming: bool = False """Whether to stream the results or not.""" context_erase: float = 0.5 """Leave (n_ctx * context_erase) tokens starting from beginning if the context has run out.""" allow_download: bool = False
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
8605d99651e1-2
starting from beginning if the context has run out.""" allow_download: bool = False """If model does not exist in ~/.cache/gpt4all/, download it.""" client: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @staticmethod def _model_param_names() -> Set[str]: return { "n_ctx", "n_predict", "top_k", "top_p", "temp", "n_batch", "repeat_penalty", "repeat_last_n", "context_erase", } def _default_params(self) -> Dict[str, Any]: return { "n_ctx": self.n_ctx, "n_predict": self.n_predict, "top_k": self.top_k, "top_p": self.top_p, "temp": self.temp, "n_batch": self.n_batch, "repeat_penalty": self.repeat_penalty, "repeat_last_n": self.repeat_last_n, "context_erase": self.context_erase, } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel except ImportError: raise ImportError( "Could not import gpt4all python package. " "Please install it with `pip install gpt4all`." ) full_path = values["model"] model_path, delimiter, model_name = full_path.rpartition("/") model_path += delimiter
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
8605d99651e1-3
model_path += delimiter values["client"] = GPT4AllModel( model_name, model_path=model_path or None, model_type=values["backend"], allow_download=values["allow_download"], ) if values["n_threads"] is not None: # set n_threads values["client"].model.set_thread_count(values["n_threads"]) try: values["backend"] = values["client"].model_type except AttributeError: # The below is for compatibility with GPT4All Python bindings <= 0.2.3. values["backend"] = values["client"].model.model_type return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params(), **{ k: v for k, v in self.__dict__.items() if k in self._model_param_names() }, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "gpt4all" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: r"""Call out to GPT4All's generate method. Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, "
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
8605d99651e1-4
.. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client.generate(prompt, **self._default_params()): if text_callback: text_callback(token) text += token if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
c5b0cfb8e98a-0
Source code for langchain.llms.self_hosted_hugging_face """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.""" import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
c5b0cfb8e98a-1
text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" )
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
c5b0cfb8e98a-2
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline [docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline): """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM(
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
c5b0cfb8e98a-3
hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task ("text-generation", "text2text-generation" or "summarization").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server."""
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
c5b0cfb8e98a-4
"""Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
c5b0cfb8e98a-5
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
d81ab44052e2-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens."""
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
d81ab44052e2-1
presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try:
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
d81ab44052e2-2
) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params:
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
d81ab44052e2-3
if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
3663bc7e25af-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Banana(LLM): """Wrapper around Banana large language models. To use, you should have the ``banana-dev`` python package installed, and the environment variable ``BANANA_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Banana banana = Banana(model_key="") """ model_key: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" banana_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names:
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
3663bc7e25af-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( values, "banana_api_key", "BANANA_API_KEY" ) values["banana_api_key"] = banana_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_key": self.model_key}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "banana" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Banana endpoint.""" try: import banana_dev as banana except ImportError: raise ImportError( "Could not import banana-dev python package. " "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {} api_key = self.banana_api_key model_key = self.model_key
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
3663bc7e25af-2
api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params, } response = banana.run(api_key, model_key, model_inputs) try: text = response["modelOutputs"][0]["output"] except (KeyError, TypeError): returned = response["modelOutputs"][0] raise ValueError( "Response should be of schema: {'output': 'text'}." f"\nResponse was: {returned}" "\nTo fix this:" "\n- fork the source repo of the Banana model" "\n- modify app.py to return the above schema" "\n- deploy that as a custom repo" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
dd11d18c74c1-0
Source code for langchain.llms.cerebriumai """Wrapper around CerebriumAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class CerebriumAI(LLM): """Wrapper around CerebriumAI large language models. To use, you should have the ``cerebrium`` python package installed, and the environment variable ``CEREBRIUMAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import CerebriumAI cerebrium = CerebriumAI(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" cerebriumai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()}
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
dd11d18c74c1-1
all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY" ) values["cerebriumai_api_key"] = cerebriumai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "cerebriumai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to CerebriumAI endpoint.""" try: from cerebrium import model_api_request
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
dd11d18c74c1-2
try: from cerebrium import model_api_request except ImportError: raise ValueError( "Could not import cerebrium python package. " "Please install it with `pip install cerebrium`." ) params = self.model_kwargs or {} response = model_api_request( self.endpoint_url, {"prompt": prompt, **params}, self.cerebriumai_api_key ) text = response["data"]["result"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
84561d26e7df-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceHub(LLM): """Wrapper around HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
84561d26e7df-1
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call( self, prompt: str, stop: Optional[List[str]] = None,
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
84561d26e7df-2
prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} response = self.client(inputs=prompt, params=_model_kwargs) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] elif self.client.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
fe9e6c109be7-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request."""
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
fe9e6c109be7-1
"""Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)):
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
fe9e6c109be7-2
for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
fe9e6c109be7-3
``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {}
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
fe9e6c109be7-4
generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
f7c678a95cb6-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class AlephAlpha(LLM): """Wrapper around Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha alpeh_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
f7c678a95cb6-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
f7c678a95cb6-2
echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
f7c678a95cb6-3
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo,
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
f7c678a95cb6-4
"minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "alpeh_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model.
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
f7c678a95cb6-5
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = alpeh_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
957571529765-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.")
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
957571529765-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {}
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
957571529765-2
""" params = self.model_kwargs or {} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 11, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
21dde289d2bc-0
Source code for langchain.llms.databricks import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM __all__ = ["Databricks"] class _DatabricksClientBase(BaseModel, ABC): """A base JSON API client that talks to Databricks.""" api_url: str api_token: str def post_raw(self, request: Any) -> Any: headers = {"Authorization": f"Bearer {self.api_token}"} response = requests.post(self.api_url, headers=headers, json=request) # TODO: error handling and automatic retries if not response.ok: raise ValueError(f"HTTP {response.status_code} error: {response.text}") return response.json() @abstractmethod def post(self, request: Any) -> Any: ... class _DatabricksServingEndpointClient(_DatabricksClientBase): """An API client that talks to a Databricks serving endpoint.""" host: str endpoint_name: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] endpoint_name = values["endpoint_name"] api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations" values["api_url"] = api_url return values def post(self, request: Any) -> Any:
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-1
return values def post(self, request: Any) -> Any: # See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html wrapped_request = {"dataframe_records": [request]} response = self.post_raw(wrapped_request)["predictions"] # For a single-record query, the result is not a list. if isinstance(response, list): response = response[0] return response class _DatabricksClusterDriverProxyClient(_DatabricksClientBase): """An API client that talks to a Databricks cluster driver proxy app.""" host: str cluster_id: str cluster_driver_port: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] cluster_id = values["cluster_id"] port = values["cluster_driver_port"] api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}" values["api_url"] = api_url return values def post(self, request: Any) -> Any: return self.post_raw(request) def get_repl_context() -> Any: """Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ValueError( "Cannot access dbruntime, not running inside a Databricks notebook." ) def get_default_host() -> str: """Gets the default Databricks workspace hostname.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-2
"""Gets the default Databricks workspace hostname. Raises an error if the hostname cannot be automatically determined. """ host = os.getenv("DATABRICKS_HOST") if not host: try: host = get_repl_context().browserHostName if not host: raise ValueError("context doesn't contain browserHostName.") except Exception as e: raise ValueError( "host was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_HOST'. Received error: {e}" ) # TODO: support Databricks CLI profile host = host.lstrip("https://").lstrip("http://").rstrip("/") return host def get_default_api_token() -> str: """Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if api_token := os.getenv("DATABRICKS_TOKEN"): return api_token try: api_token = get_repl_context().apiToken if not api_token: raise ValueError("context doesn't contain apiToken.") except Exception as e: raise ValueError( "api_token was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}" ) # TODO: support Databricks CLI profile return api_token [docs]class Databricks(LLM): """LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app. It supports two endpoint types: * **Serving endpoint** (recommended for both production and development). We assume that an LLM was registered and deployed to a serving endpoint.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-3
We assume that an LLM was registered and deployed to a serving endpoint. To wrap it as an LLM you must have "Can Query" permission to the endpoint. Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and ``cluster_driver_port``. The expected model signature is: * inputs:: [{"name": "prompt", "type": "string"}, {"name": "stop", "type": "list[string]"}] * outputs: ``[{"type": "string"}]`` * **Cluster driver proxy app** (recommended for interactive development). One can load an LLM on a Databricks interactive cluster and start a local HTTP server on the driver node to serve the model at ``/`` using HTTP POST method with JSON input/output. Please use a port number between ``[3000, 8000]`` and let the server listen to the driver IP address or simply ``0.0.0.0`` instead of localhost only. To wrap it as an LLM you must have "Can Attach To" permission to the cluster. Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``. The expected server schema (using JSON schema) is: * inputs:: {"type": "object", "properties": { "prompt": {"type": "string"}, "stop": {"type": "array", "items": {"type": "string"}}}, "required": ["prompt"]}` * outputs: ``{"type": "string"}`` If the endpoint model signature is different or you want to set extra params, you can use `transform_input_fn` and `transform_output_fn` to apply necessary
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-4
you can use `transform_input_fn` and `transform_output_fn` to apply necessary transformations before and after the query. """ host: str = Field(default_factory=get_default_host) """Databricks workspace hostname. If not provided, the default value is determined by * the ``DATABRICKS_HOST`` environment variable if present, or * the hostname of the current Databricks workspace if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ api_token: str = Field(default_factory=get_default_api_token) """Databricks personal access token. If not provided, the default value is determined by * the ``DATABRICKS_TOKEN`` environment variable if present, or * an automatically generated temporary token if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ endpoint_name: Optional[str] = None """Name of the model serving endpont. You must specify the endpoint name to connect to a model serving endpoint. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_id: Optional[str] = None """ID of the cluster if connecting to a cluster driver proxy app. If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode, the current cluster ID is used as default. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_driver_port: Optional[str] = None
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-5
""" cluster_driver_port: Optional[str] = None """The port number used by the HTTP server running on the cluster driver node. The server should listen on the driver IP address or simply ``0.0.0.0`` to connect. We recommend the server using a port number between ``[3000, 8000]``. """ model_kwargs: Optional[Dict[str, Any]] = None """Extra parameters to pass to the endpoint.""" transform_input_fn: Optional[Callable] = None """A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible request object that the endpoint accepts. For example, you can apply a prompt template to the input prompt. """ transform_output_fn: Optional[Callable[..., str]] = None """A function that transforms the output from the endpoint to the generated text. """ _client: _DatabricksClientBase = PrivateAttr() class Config: extra = Extra.forbid underscore_attrs_are_private = True @validator("cluster_id", always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_id.") elif values["endpoint_name"]: return None elif v: return v else: try: if v := get_repl_context().clusterId: return v raise ValueError("Context doesn't contain clusterId.") except Exception as e: raise ValueError( "Neither endpoint_name nor cluster_id was set. " "And the cluster_id cannot be automatically determined. Received"
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
21dde289d2bc-6
"And the cluster_id cannot be automatically determined. Received" f" error: {e}" ) @validator("cluster_driver_port", always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_driver_port.") elif values["endpoint_name"]: return None elif v is None: raise ValueError( "Must set cluster_driver_port to connect to a cluster driver." ) elif int(v) <= 0: raise ValueError(f"Invalid cluster_driver_port: {v}") else: return v @validator("model_kwargs", always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: if v: assert "prompt" not in v, "model_kwargs must not contain key 'prompt'" assert "stop" not in v, "model_kwargs must not contain key 'stop'" return v def __init__(self, **data: Any): super().__init__(**data) if self.endpoint_name: self._client = _DatabricksServingEndpointClient( host=self.host, api_token=self.api_token, endpoint_name=self.endpoint_name, ) elif self.cluster_id and self.cluster_driver_port: self._client = _DatabricksClusterDriverProxyClient( host=self.host, api_token=self.api_token, cluster_id=self.cluster_id, cluster_driver_port=self.cluster_driver_port, ) else: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html