id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
b64d1f462f17-0 | Source code for langchain.retrievers.zep
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from zep_python import SearchResult
[docs]class ZepRetriever(BaseRetriever):
"""A Retriever implementation for the Zep long-term memory store. Search your
user's long-term chat history with Zep.
Note: You will need to provide the user's `session_id` to use this retriever.
More on Zep:
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions, see:
https://getzep.github.io/deployment/quickstart/
"""
def __init__(
self,
session_id: str,
url: str,
top_k: Optional[int] = None,
):
try:
from zep_python import ZepClient
except ImportError:
raise ValueError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url)
self.session_id = session_id
self.top_k = top_k
def _search_result_to_doc(self, results: List[SearchResult]) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
if r.message
] | https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html |
b64d1f462f17-1 | )
for r in results
if r.message
]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from zep_python import SearchPayload
payload: SearchPayload = SearchPayload(text=query)
results: List[SearchResult] = self.zep_client.search_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
from zep_python import SearchPayload
payload: SearchPayload = SearchPayload(text=query)
results: List[SearchResult] = await self.zep_client.asearch_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html |
d1a96e3da37e-0 | Source code for langchain.retrievers.azure_cognitive_search
"""Retriever wrapper for Azure Cognitive Search."""
from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
[docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel):
"""Wrapper around Azure Cognitive Search."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
d1a96e3da37e-1 | )
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
return base_url + endpoint_path + f"&search={query}"
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
search_results = self._search(query)
return [ | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
d1a96e3da37e-2 | search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
3067f2752e09-0 | Source code for langchain.retrievers.contextual_compression
"""Retriever that wraps a base retriever and filters the results."""
from typing import List
from pydantic import BaseModel, Extra
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import BaseRetriever, Document
[docs]class ContextualCompressionRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: BaseRetriever
"""Base Retriever to use for getting relevant documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(query)
compressed_docs = self.base_compressor.compress_documents(docs, query)
return list(compressed_docs)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.aget_relevant_documents(query)
compressed_docs = await self.base_compressor.acompress_documents(docs, query)
return list(compressed_docs)
By Harrison Chase | https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html |
3067f2752e09-1 | return list(compressed_docs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html |
fad5c0e30210-0 | Source code for langchain.retrievers.knn
"""KNN Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class KNNRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> KNNRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
query_embeds = np.array(self.embeddings.embed_query(query))
# calc L2 norm
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
similarities = index_embeds.dot(query_embeds) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html |
fad5c0e30210-1 | similarities = index_embeds.dot(query_embeds)
sorted_ix = np.argsort(-similarities)
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[0 : self.k]:
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
):
top_k_results.append(Document(page_content=self.texts[row]))
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html |
7a80b3dfc868-0 | Source code for langchain.retrievers.wikipedia
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.wikipedia import WikipediaAPIWrapper
[docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""
It is effectively a wrapper for WikipediaAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/wikipedia.html |
eff935bec9e7-0 | Source code for langchain.retrievers.metal
from typing import Any, List, Optional
from langchain.schema import BaseRetriever, Document
[docs]class MetalRetriever(BaseRetriever):
def __init__(self, client: Any, params: Optional[dict] = None):
from metal_sdk.metal import Metal
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
self.client: Metal = client
self.params = params or {}
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html |
681633d9f98e-0 | Source code for langchain.retrievers.pinecone_hybrid_search
"""Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def hash_text(text: str) -> str:
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
681633d9f98e-1 | ]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors)
[docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
sparse_encoder: Any
index: Any
top_k: int = 4
alpha: float = 0.5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment.""" | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
681633d9f98e-2 | """Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ValueError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
4273cb11029a-0 | Source code for langchain.retrievers.document_compressors.base
"""Interface for retrieved document compressors."""
from abc import ABC, abstractmethod
from typing import List, Sequence, Union
from pydantic import BaseModel
from langchain.schema import BaseDocumentTransformer, Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base abstraction interface for document compression."""
@abstractmethod
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
@abstractmethod
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
[docs]class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of transformers."""
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html |
4273cb11029a-1 | self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html |
27eb219acb54-0 | Source code for langchain.retrievers.document_compressors.embeddings_filter
"""Document compressor that uses embeddings to drop documents unrelated to the query."""
from typing import Callable, Dict, Optional, Sequence
import numpy as np
from pydantic import root_validator
from langchain.document_transformers import (
_get_embeddings_from_stateful_docs,
get_stateful_documents,
)
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import Document
[docs]class EmbeddingsFilter(BaseDocumentCompressor):
embeddings: Embeddings
"""Embeddings to use for embedding document contents and queries."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
k: Optional[int] = 20
"""The number of relevant documents to return. Can be set to None, in which case
`similarity_threshold` must be specified. Defaults to 20."""
similarity_threshold: Optional[float]
"""Threshold for determining when two documents are similar enough
to be considered redundant. Defaults to None, must be specified if `k` is set
to None."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate similarity parameters."""
if values["k"] is None and values["similarity_threshold"] is None:
raise ValueError("Must specify one of `k` or `similarity_threshold`.")
return values | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html |
27eb219acb54-1 | return values
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter documents based on similarity of their embeddings to the query."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
embedded_query = self.embeddings.embed_query(query)
similarity = self.similarity_fn([embedded_query], embedded_documents)[0]
included_idxs = np.arange(len(embedded_documents))
if self.k is not None:
included_idxs = np.argsort(similarity)[::-1][: self.k]
if self.similarity_threshold is not None:
similar_enough = np.where(
similarity[included_idxs] > self.similarity_threshold
)
included_idxs = included_idxs[similar_enough]
return [stateful_documents[i] for i in included_idxs]
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html |
e936ba254b87-0 | Source code for langchain.retrievers.document_compressors.chain_extract
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
import asyncio
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseOutputParser, Document
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
[docs]class LLMChainExtractor(BaseDocumentCompressor):
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents( | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
e936ba254b87-1 | [docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
outputs = await asyncio.gather(
*[
self.llm_chain.apredict_and_parse(**self.get_input(query, doc))
for doc in documents
]
)
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
e936ba254b87-2 | _get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
return cls(llm_chain=llm_chain, get_input=_get_input)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
fc34c8634619-0 | Source code for langchain.retrievers.document_compressors.chain_filter
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import BasePromptTemplate, LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
from langchain.schema import Document
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
[docs]class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(**_input) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html |
fc34c8634619-1 | include_doc = self.llm_chain.predict_and_parse(**_input)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any
) -> "LLMChainFilter":
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html |
8ea86dfc5deb-0 | Source code for langchain.retrievers.document_compressors.cohere_rerank
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Sequence
from pydantic import Extra, root_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
[docs]class CohereRerank(BaseDocumentCompressor):
client: Client
top_n: int = 3
model: str = "rerank-english-v2.0"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
doc_list = list(documents) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html |
8ea86dfc5deb-1 | ) -> Sequence[Document]:
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html |
d4e0557c686e-0 | Source code for langchain.retrievers.self_query.base
"""Retriever that generates and executes structured queries over its own data source."""
from typing import Any, Dict, List, Optional, Type, cast
from pydantic import BaseModel, Field, root_validator
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.chains.query_constructor.base import load_query_constructor_chain
from langchain.chains.query_constructor.ir import StructuredQuery, Visitor
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.chroma import ChromaTranslator
from langchain.retrievers.self_query.pinecone import PineconeTranslator
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import Chroma, Pinecone, VectorStore, Weaviate
def _get_builtin_translator(vectorstore_cls: Type[VectorStore]) -> Visitor:
"""Get the translator class corresponding to the vector store class."""
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {
Pinecone: PineconeTranslator,
Chroma: ChromaTranslator,
Weaviate: WeaviateTranslator,
}
if vectorstore_cls not in BUILTIN_TRANSLATORS:
raise ValueError(
f"Self query retriever with Vector Store type {vectorstore_cls}"
f" not supported."
)
return BUILTIN_TRANSLATORS[vectorstore_cls]()
[docs]class SelfQueryRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps around a vector store and uses an LLM to generate
the vector store queries."""
vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved.""" | https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
d4e0557c686e-1 | vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved."""
llm_chain: LLMChain
"""The LLMChain for generating the vector store queries."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass in to the vector store search."""
structured_query_translator: Visitor
"""Translator for turning internal query language into vectorstore search params."""
verbose: bool = False
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_translator(cls, values: Dict) -> Dict:
"""Validate translator."""
if "structured_query_translator" not in values:
vectorstore_cls = values["vectorstore"].__class__
values["structured_query_translator"] = _get_builtin_translator(
vectorstore_cls
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
inputs = self.llm_chain.prep_inputs({"query": query})
structured_query = cast(
StructuredQuery, self.llm_chain.predict_and_parse(callbacks=None, **inputs)
)
if self.verbose:
print(structured_query)
new_query, new_kwargs = self.structured_query_translator.visit_structured_query(
structured_query
)
if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit | https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
d4e0557c686e-2 | if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit
search_kwargs = {**self.search_kwargs, **new_kwargs}
docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
document_contents: str,
metadata_field_info: List[AttributeInfo],
structured_query_translator: Optional[Visitor] = None,
chain_kwargs: Optional[Dict] = None,
enable_limit: bool = False,
**kwargs: Any,
) -> "SelfQueryRetriever":
if structured_query_translator is None:
structured_query_translator = _get_builtin_translator(vectorstore.__class__)
chain_kwargs = chain_kwargs or {}
if "allowed_comparators" not in chain_kwargs:
chain_kwargs[
"allowed_comparators"
] = structured_query_translator.allowed_comparators
if "allowed_operators" not in chain_kwargs:
chain_kwargs[
"allowed_operators"
] = structured_query_translator.allowed_operators
llm_chain = load_query_constructor_chain(
llm,
document_contents,
metadata_field_info,
enable_limit=enable_limit,
**chain_kwargs,
)
return cls(
llm_chain=llm_chain,
vectorstore=vectorstore,
structured_query_translator=structured_query_translator,
**kwargs,
)
By Harrison Chase | https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
d4e0557c686e-3 | **kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
2d7b535fb1ed-0 | Source code for langchain.embeddings.aleph_alpha
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
"""
Wrapper for Aleph Alpha's Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
hosting: Optional[str] = "https://api.aleph-alpha.com"
"""Optional parameter that specifies which datacenters may process the request."""
normalize: Optional[bool] = True
"""Should returned embeddings be normalized"""
compress_to_size: Optional[int] = 128
"""Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim."""
contextual_control_threshold: Optional[int] = None
"""Attention control parameters only apply to those tokens that have | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
2d7b535fb1ed-1 | """Attention control parameters only apply to those tokens that have
explicitly been set in the request."""
control_log_additive: Optional[bool] = True
"""Apply controls on prompt items by adding the log(control_factor)
to attention scores."""
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
values["client"] = Client(token=aleph_alpha_api_key)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's asymmetric Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
document_embeddings = []
for text in texts:
document_params = {
"prompt": Prompt.from_text(text), | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
2d7b535fb1ed-2 | document_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Document,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
document_request = SemanticEmbeddingRequest(**document_params)
document_response = self.client.semantic_embed(
request=document_request, model=self.model
)
document_embeddings.append(document_response.embedding)
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
symmetric_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Query,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
symmetric_request = SemanticEmbeddingRequest(**symmetric_params)
symmetric_response = self.client.semantic_embed(
request=symmetric_request, model=self.model
)
return symmetric_response.embedding | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
2d7b535fb1ed-3 | request=symmetric_request, model=self.model
)
return symmetric_response.embedding
[docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
"""The symmetric version of the Aleph Alpha's semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
"""
def _embed(self, text: str) -> List[float]:
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
query_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Symmetric,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
query_request = SemanticEmbeddingRequest(**query_params)
query_response = self.client.semantic_embed(
request=query_request, model=self.model
)
return query_response.embedding
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint.
Args: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
2d7b535fb1ed-4 | """Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
a2fa231208fd-0 | Source code for langchain.embeddings.sagemaker_endpoint
"""Wrapper around Sagemaker InvokeEndpoint API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]):
"""Content handler for LLM class."""
[docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointEmbeddings
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
""" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
a2fa231208fd-1 | credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: EmbeddingsContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompts: prompts, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["vectors"]
""" # noqa: E501
model_kwargs: Optional[Dict] = None | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
a2fa231208fd-2 | """ # noqa: E501
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
a2fa231208fd-3 | # replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
[docs] def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.extend(response)
return results
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
a2fa231208fd-4 | """Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])[0]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
7e9ee54085a4-0 | Source code for langchain.embeddings.mosaicml
"""Wrapper around MosaicML APIs."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings):
"""Wrapper around MosaicML's embedding inference service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import MosaicMLInstructorEmbeddings
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
mosaic_llm = MosaicMLInstructorEmbeddings(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
"""Endpoint URL to use."""
embed_instruction: str = "Represent the document for retrieval: "
"""Instruction used to embed documents."""
query_instruction: str = (
"Represent the question for retrieving supporting documents: "
)
"""Instruction used to embed the query."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid | https://python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html |
7e9ee54085a4-1 | """Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"endpoint_url": self.endpoint_url}
def _embed(
self, input: List[Tuple[str, str]], is_retry: bool = False
) -> List[List[float]]:
payload = {"input_strings": input}
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
parsed_response = response.json()
if "error" in parsed_response:
# if we get rate limited, try sleeping for 1 second
if (
not is_retry
and "rate limit exceeded" in parsed_response["error"].lower()
):
import time
time.sleep(self.retry_sleep)
return self._embed(input, is_retry=True)
raise ValueError(
f"Error raised by inference API: {parsed_response['error']}"
) | https://python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html |
7e9ee54085a4-2 | f"Error raised by inference API: {parsed_response['error']}"
)
if "data" not in parsed_response:
raise ValueError(
f"Error raised by inference API, no key data: {parsed_response}"
)
embeddings = parsed_response["data"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MosaicML deployed instructor embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [(self.embed_instruction, text) for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MosaicML deployed instructor embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = (self.query_instruction, text)
embedding = self._embed([instruction_pair])[0]
return embedding
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html |
8a61c0c4fc15-0 | Source code for langchain.embeddings.openai
"""Wrapper around OpenAI embedding models."""
from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import numpy as np
from pydantic import BaseModel, Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-1 | def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
return embeddings.client.create(**kwargs)
return _embed_with_retry(**kwargs)
[docs]class OpenAIEmbeddings(BaseModel, Embeddings):
"""Wrapper around OpenAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
In order to use the library with Microsoft Azure endpoints, you need to set
the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION.
The OPENAI_API_TYPE must be set to 'azure' and the others correspond to
the properties of your endpoint.
In addition, the deployment name must be passed as the model parameter.
Example:
.. code-block:: python
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-2 | from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(
deployment="your-embeddings-deployment-name",
model="your-embeddings-model-name",
api_base="https://your-endpoint.openai.azure.com/",
api_type="azure",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names
openai_api_version: Optional[str] = None
# to support Azure OpenAI Service custom endpoints
openai_api_base: Optional[str] = None
# to support Azure OpenAI Service custom endpoints
openai_api_type: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
embedding_ctx_length: int = 8191
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the OpenAPI request."""
headers: Any = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-3 | """Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_api_type = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
if openai_api_type in ("azure", "azure_ad", "azuread"):
default_api_version = "2022-12-01"
else:
default_api_version = ""
openai_api_version = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
default=default_api_version,
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
if openai_api_base:
openai.api_base = openai_api_base
if openai_api_type:
openai.api_version = openai_api_version | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-4 | if openai_api_type:
openai.api_version = openai_api_version
if openai_api_type:
openai.api_type = openai_api_type
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
values["client"] = openai.Embedding
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
# please refer to
# https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
def _get_len_safe_embeddings(
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None
) -> List[List[float]]:
embeddings: List[List[float]] = [[] for _ in range(len(texts))]
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for OpenAIEmbeddings. "
"Please install it with `pip install tiktoken`."
)
tokens = []
indices = []
encoding = tiktoken.model.encoding_for_model(self.model)
for i, text in enumerate(texts):
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
token = encoding.encode(
text,
allowed_special=self.allowed_special, | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-5 | token = encoding.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens += [token[j : j + self.embedding_ctx_length]]
indices += [i]
batched_embeddings = []
_chunk_size = chunk_size or self.chunk_size
for i in range(0, len(tokens), _chunk_size):
response = embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
engine=self.deployment,
request_timeout=self.request_timeout,
headers=self.headers,
)
batched_embeddings += [r["embedding"] for r in response["data"]]
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
results[indices[i]].append(batched_embeddings[i])
num_tokens_in_batch[indices[i]].append(len(tokens[i]))
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average = embed_with_retry(
self,
input="",
engine=self.deployment,
request_timeout=self.request_timeout,
headers=self.headers,
)["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
def _embedding_func(self, text: str, *, engine: str) -> List[float]: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-6 | def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint."""
# handle large input text
if len(text) > self.embedding_ctx_length:
return self._get_len_safe_embeddings([text], engine=engine)[0]
else:
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return embed_with_retry(
self,
input=[text],
engine=engine,
request_timeout=self.request_timeout,
headers=self.headers,
)["data"][0]["embedding"]
[docs] def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function.
return self._get_len_safe_embeddings(texts, engine=self.deployment)
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
8a61c0c4fc15-7 | text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = self._embedding_func(text, engine=self.deployment)
return embedding
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
03f4b402c0ba-0 | Source code for langchain.embeddings.modelscope_hub
"""Wrapper around ModelScopeHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
[docs]class ModelScopeEmbeddings(BaseModel, Embeddings):
"""Wrapper around modelscope_hub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id)
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id)
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts)) | https://python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html |
03f4b402c0ba-1 | """
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html |
5bfb524e3dc3-0 | Source code for langchain.embeddings.cohere
"""Wrapper around Cohere embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class CohereEmbeddings(BaseModel, Embeddings):
"""Wrapper around Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
5bfb524e3dc3-1 | except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(
model=self.model, texts=[text], truncate=self.truncate
).embeddings[0]
return list(map(float, embedding))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
d4382e5d895b-0 | Source code for langchain.embeddings.tensorflow_hub
"""Wrapper around TensorflowHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
[docs]class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around tensorflow_hub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
"Could not import tensorflow-hub python package. "
"Please install it with `pip install tensorflow-hub``."
)
try:
import tensorflow_text # noqa
except ImportError:
raise ImportError(
"Could not import tensorflow_text python package. "
"Please install it with `pip install tensorflow_text``."
)
self.embed = tensorflow_hub.load(self.model_url)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
d4382e5d895b-1 | [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
71b66430cd00-0 | Source code for langchain.embeddings.self_hosted
"""Running custom embedding models on self-hosted remote hardware."""
from typing import Any, Callable, List
from pydantic import Extra
from langchain.embeddings.base import Embeddings
from langchain.llms import SelfHostedPipeline
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
[docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Runs custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
71b66430cd00-1 | model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
[docs] def embed_query(self, text: str) -> List[float]: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
71b66430cd00-2 | [docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
0ce55d92a1b1-0 | Source code for langchain.embeddings.elasticsearch
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch.client import MlClient
from langchain.embeddings.base import Embeddings
[docs]class ElasticsearchEmbeddings(Embeddings):
"""
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
[docs] @classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None, | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
0ce55d92a1b1-1 | es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example Usage:
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically pulled
# in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install " | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
0ce55d92a1b1-2 | raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts) | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
0ce55d92a1b1-3 | list.
"""
return self._embedding_func(texts)
[docs] def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html |
db21e677fe5a-0 | Source code for langchain.embeddings.self_hosted_hugging_face
"""Wrapper around HuggingFace embedding models for self-hosted remote hardware."""
import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
logger = logging.getLogger(__name__)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any:
"""Load the embedding model."""
if not instruct:
import sentence_transformers
client = sentence_transformers.SentenceTransformer(model_id)
else:
from InstructorEmbedding import INSTRUCTOR
client = INSTRUCTOR(model_id)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning( | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
db21e677fe5a-1 | if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
client = client.to(device)
return client
[docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
"""
client: Any #: :meta private:
model_id: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_reqs: List[str] = ["./", "sentence_transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable = load_embedding_model | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
db21e677fe5a-2 | model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
"""Runs InstructorEmbedding embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
""" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
db21e677fe5a-3 | model_name=model_name, hardware=gpu)
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"]
"""Requirements to install on hardware to inference the model."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get(
"model_id", DEFAULT_INSTRUCT_MODEL
)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
db21e677fe5a-4 | text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
83df1cc11e4d-0 | Source code for langchain.embeddings.minimax
"""Wrapper around MiniMax APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
[docs]class MiniMaxEmbeddings(BaseModel, Embeddings):
"""Wrapper around MiniMax's embedding inference service.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html |
83df1cc11e4d-1 | the constructor.
Example:
.. code-block:: python
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = None
"""Group ID for MiniMax API."""
minimax_api_key: Optional[str] = None
"""API Key for MiniMax API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
minimax_api_key = get_from_dict_or_env(
values, "minimax_api_key", "MINIMAX_API_KEY"
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
def embed(
self,
texts: List[str],
embed_type: str, | https://python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html |
83df1cc11e4d-2 | self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key}",
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0] | https://python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html |
83df1cc11e4d-3 | )
return embeddings[0]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html |
f7e5465878df-0 | Source code for langchain.embeddings.huggingface
"""Wrapper around HuggingFace embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
[docs]class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
f7e5465878df-1 | super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(text, **self.encode_kwargs)
return embedding.tolist()
[docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers``
and ``InstructorEmbedding`` python packages installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceInstructEmbeddings | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
f7e5465878df-2 | from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name, model_kwargs=model_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
from InstructorEmbedding import INSTRUCTOR
self.client = INSTRUCTOR(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
except ImportError as e:
raise ValueError("Dependencies for InstructorEmbedding not found.") from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
""" | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
f7e5465878df-3 | Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(instruction_pairs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client.encode([instruction_pair])[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
ae81466257de-0 | Source code for langchain.embeddings.llamacpp
"""Wrapper around llama.cpp embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
[docs]class LlamaCppEmbeddings(BaseModel, Embeddings):
"""Wrapper around llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock") | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
ae81466257de-1 | use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. " | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
ae81466257de-2 | raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
f83a6464f95c-0 | Source code for langchain.embeddings.huggingface_hub
"""Wrapper around HuggingFace Hub embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2"
VALID_TASKS = ("feature-extraction",)
[docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around HuggingFaceHub embedding models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = "feature-extraction"
"""Task to call the model with."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict: | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
f83a6464f95c-1 | @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
if not repo_id.startswith("sentence-transformers"):
raise ValueError(
"Currently only 'sentence-transformers' embedding models "
f"are supported. Got invalid 'repo_id' {repo_id}."
)
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts] | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
f83a6464f95c-2 | texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = self.client(inputs=texts, params=_model_kwargs)
return responses
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = self.embed_documents([text])[0]
return response
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
e0fed0878b3f-0 | Source code for langchain.embeddings.fake
from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
[docs]class FakeEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=self.size))
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding() for _ in texts]
[docs] def embed_query(self, text: str) -> List[float]:
return self._get_embedding()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html |
12a08f6cc9a3-0 | Source code for langchain.utilities.google_serper
"""Util that calls Google Search using the Serper.dev API."""
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from pydantic.class_validators import root_validator
from pydantic.main import BaseModel
from typing_extensions import Literal
from langchain.utils import get_from_dict_or_env
[docs]class GoogleSerperAPIWrapper(BaseModel):
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
k: int = 10
gl: str = "us"
hl: str = "en"
# "places" and "images" is available from Serper but not implemented in the
# parser of run(). They can be used in results()
type: Literal["news", "search", "places", "images"] = "search"
result_key_for_type = {
"news": "news",
"places": "places",
"images": "images",
"search": "organic",
}
tbs: Optional[str] = None
serper_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator() | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
12a08f6cc9a3-1 | arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(
values, "serper_api_key", "SERPER_API_KEY"
)
values["serper_api_key"] = serper_api_key
return values
[docs] def results(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
[docs] def run(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result."""
results = self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
return self._parse_results(results)
[docs] async def aresults(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return results
[docs] async def arun(self, query: str, **kwargs: Any) -> str: | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
12a08f6cc9a3-2 | """Run query through GoogleSearch and parse result async."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return self._parse_results(results)
def _parse_snippets(self, results: dict) -> List[str]:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return [answer_box.get("answer")]
elif answer_box.get("snippet"):
return [answer_box.get("snippet").replace("\n", " ")]
elif answer_box.get("snippetHighlighted"):
return answer_box.get("snippetHighlighted")
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(result["snippet"])
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return ["No good Google Search Result was found"]
return snippets
def _parse_results(self, results: dict) -> str: | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
12a08f6cc9a3-3 | return snippets
def _parse_results(self, results: dict) -> str:
return " ".join(self._parse_snippets(results))
def _google_serper_api_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
response = requests.post(
f"https://google.serper.dev/{search_type}", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results
async def _async_google_serper_search_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
url = f"https://google.serper.dev/{search_type}"
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
url, params=params, headers=headers, raise_for_status=False
) as response:
search_results = await response.json()
else:
async with self.aiosession.post(
url, params=params, headers=headers, raise_for_status=True | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
12a08f6cc9a3-4 | url, params=params, headers=headers, raise_for_status=True
) as response:
search_results = await response.json()
return search_results
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
4feef66dbfc7-0 | Source code for langchain.utilities.openweathermap
"""Util that calls OpenWeatherMap using PyOWM."""
from typing import Any, Dict, Optional
from pydantic import Extra, root_validator
from langchain.tools.base import BaseModel
from langchain.utils import get_from_dict_or_env
[docs]class OpenWeatherMapAPIWrapper(BaseModel):
"""Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
1. Go to OpenWeatherMap and sign up for an API key
2. Save your API KEY into OPENWEATHERMAP_API_KEY env variable
3. pip install pyowm
"""
owm: Any
openweathermap_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(
values, "openweathermap_api_key", "OPENWEATHERMAP_API_KEY"
)
try:
import pyowm
except ImportError:
raise ImportError(
"pyowm is not installed. Please install it with `pip install pyowm`"
)
owm = pyowm.OWM(openweathermap_api_key)
values["owm"] = owm
return values
def _format_weather_info(self, location: str, w: Any) -> str:
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
temperature = w.temperature("celsius")
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds | https://python.langchain.com/en/latest/_modules/langchain/utilities/openweathermap.html |
4feef66dbfc7-1 | heat_index = w.heat_index
clouds = w.clouds
return (
f"In {location}, the current weather is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
f"Temperature: \n"
f" - Current: {temperature['temp']}°C\n"
f" - High: {temperature['temp_max']}°C\n"
f" - Low: {temperature['temp_min']}°C\n"
f" - Feels like: {temperature['feels_like']}°C\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index}\n"
f"Cloud cover: {clouds}%"
)
[docs] def run(self, location: str) -> str:
"""Get the current weather information for a specified location."""
mgr = self.owm.weather_manager()
observation = mgr.weather_at_place(location)
w = observation.weather
return self._format_weather_info(location, w)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/openweathermap.html |
0ff573975013-0 | Source code for langchain.utilities.arxiv
"""Util that calls Arxiv."""
import logging
import os
from typing import Any, Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
[docs]class ArxivAPIWrapper(BaseModel):
"""Wrapper around ArxivAPI.
To use, you should have the ``arxiv`` python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don't want to limit the content size.
Parameters:
top_k_results: number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool.
load_max_docs: a limit to the number of loaded documents
load_all_available_meta:
if True: the `metadata` of the loaded Documents gets all available meta info
(see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the `metadata` gets only the most informative fields.
"""
arxiv_client: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
top_k_results: int = 3
ARXIV_MAX_QUERY_LENGTH = 300
load_max_docs: int = 100
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object.""" | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
0ff573975013-1 | class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
values["arxiv_result"] = arxiv.Result
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
[docs] def run(self, query: str) -> str:
"""
Run Arxiv search and get the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
See https://lukasschwab.me/arxiv.py/index.html#Result
It uses only the most informative fields of article meta information.
"""
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results
).results()
except self.arxiv_exceptions as ex:
return f"Arxiv exception: {ex}"
docs = [
f"Published: {result.updated.date()}\nTitle: {result.title}\n"
f"Authors: {', '.join(a.name for a in result.authors)}\n"
f"Summary: {result.summary}"
for result in results
]
if docs: | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
0ff573975013-2 | for result in results
]
if docs:
return "\n\n".join(docs)[: self.doc_content_chars_max]
else:
return "No good Arxiv Result was found"
[docs] def load(self, query: str) -> List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
"""
try:
import fitz
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs
).results()
except self.arxiv_exceptions as ex:
logger.debug("Error on arxiv: %s", ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = "".join(page.get_text() for page in doc_file)
except FileNotFoundError as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {
"entry_id": result.entry_id,
"published_first_time": str(result.published.date()),
"comment": result.comment,
"journal_ref": result.journal_ref,
"doi": result.doi,
"primary_category": result.primary_category, | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
0ff573975013-3 | "doi": result.doi,
"primary_category": result.primary_category,
"categories": result.categories,
"links": [link.href for link in result.links],
}
else:
extra_metadata = {}
metadata = {
"Published": str(result.updated.date()),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
"Summary": result.summary,
**extra_metadata,
}
doc = Document(
page_content=text[: self.doc_content_chars_max], metadata=metadata
)
docs.append(doc)
os.remove(doc_file_name)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
2a08ae63fc8f-0 | Source code for langchain.utilities.spark_sql
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterable, List, Optional
if TYPE_CHECKING:
from pyspark.sql import DataFrame, Row, SparkSession
[docs]class SparkSQL:
def __init__(
self,
spark_session: Optional[SparkSession] = None,
catalog: Optional[str] = None,
schema: Optional[str] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
):
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
self._spark = (
spark_session if spark_session else SparkSession.builder.getOrCreate()
)
if catalog is not None:
self._spark.catalog.setCurrentCatalog(catalog)
if schema is not None:
self._spark.catalog.setCurrentDatabase(schema)
self._all_tables = set(self._get_all_table_names())
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
) | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
2a08ae63fc8f-1 | f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
[docs] @classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SparkSQL:
"""Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
spark = SparkSession.builder.remote(database_uri).getOrCreate()
return cls(spark, **kwargs)
[docs] def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
# sorting the result can help LLM understanding it.
return sorted(self._all_tables - self._ignore_tables)
def _get_all_table_names(self) -> Iterable[str]:
rows = self._spark.sql("SHOW TABLES").select("tableName").collect()
return list(map(lambda row: row.tableName, rows))
def _get_create_table_stmt(self, table: str) -> str:
statement = (
self._spark.sql(f"SHOW CREATE TABLE {table}").collect()[0].createtab_stmt | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
2a08ae63fc8f-2 | )
# Ignore the data source provider and options to reduce the number of tokens.
using_clause_index = statement.find("USING")
return statement[:using_clause_index] + ";"
[docs] def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
tables = []
for table_name in all_table_names:
table_info = self._get_create_table_stmt(table_name)
if self._sample_rows_in_table_info:
table_info += "\n\n/*"
table_info += f"\n{self._get_sample_spark_rows(table_name)}\n"
table_info += "*/"
tables.append(table_info)
final_str = "\n\n".join(tables)
return final_str
def _get_sample_spark_rows(self, table: str) -> str:
query = f"SELECT * FROM {table} LIMIT {self._sample_rows_in_table_info}"
df = self._spark.sql(query)
columns_str = "\t".join(list(map(lambda f: f.name, df.schema.fields)))
try:
sample_rows = self._get_dataframe_results(df)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
except Exception:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table} table:\n" | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
2a08ae63fc8f-3 | f"{self._sample_rows_in_table_info} rows from {table} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _convert_row_as_tuple(self, row: Row) -> tuple:
return tuple(map(str, row.asDict().values()))
def _get_dataframe_results(self, df: DataFrame) -> list:
return list(map(self._convert_row_as_tuple, df.collect()))
[docs] def run(self, command: str, fetch: str = "all") -> str:
df = self._spark.sql(command)
if fetch == "one":
df = df.limit(1)
return str(self._get_dataframe_results(df))
[docs] def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
[docs] def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned. | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
2a08ae63fc8f-4 | If the statement throws an error, the error message is returned.
"""
try:
from pyspark.errors import PySparkException
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
try:
return self.run(command, fetch)
except PySparkException as e:
"""Format the error message"""
return f"Error: {e}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
b14c336a7465-0 | Source code for langchain.utilities.duckduckgo_search
"""Util that calls DuckDuckGo Search.
No setup required. Free.
https://pypi.org/project/duckduckgo-search/
"""
from typing import Dict, List, Optional
from pydantic import BaseModel, Extra
from pydantic.class_validators import root_validator
[docs]class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup
"""
k: int = 10
region: Optional[str] = "wt-wt"
safesearch: str = "moderate"
time: Optional[str] = "y"
max_results: int = 5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import ddg # noqa: F401
except ImportError:
raise ValueError(
"Could not import duckduckgo-search python package. "
"Please install it with `pip install duckduckgo-search`."
)
return values
[docs] def get_snippets(self, query: str) -> List[str]:
"""Run query through DuckDuckGo and return concatenated results."""
from duckduckgo_search import ddg
results = ddg(
query,
region=self.region,
safesearch=self.safesearch,
time=self.time,
max_results=self.max_results,
)
if results is None or len(results) == 0: | https://python.langchain.com/en/latest/_modules/langchain/utilities/duckduckgo_search.html |
Subsets and Splits