date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~render.py | """Different methods for rendering Tools to be passed to LLMs.
Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way.
This module contains various ways to render tools.
"""
from libs.core.langchain_core.tools import BaseTool
from langchain_community.utils.openai_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_openai_function,
)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~onedrive_file.py | from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, List
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
if TYPE_CHECKING:
from O365.drive import File
CHUNK_SIZE = 1024 * 1024 * 5
class OneDriveFileLoader(BaseLoader, BaseModel):
"""Load a file from `Microsoft OneDrive`."""
file: File = Field(...)
"""The file to load."""
class Config:
arbitrary_types_allowed = True
"""Allow arbitrary types. This is needed for the File type. Default is True.
See https://pydantic-docs.helpmanual.io/usage/types/#arbitrary-types-allowed"""
def load(self) -> List[Document]:
"""Load Documents"""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.file.name}"
self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE)
loader = UnstructuredFileLoader(file_path)
return loader.load()
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~document_compressors~cohere_rerank.py | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Client
"""Cohere client to use for compressing documents."""
top_n: int = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
user_agent: str = "langchain"
"""Identifier for the application making the request."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_vald.py | """Test Vald functionality."""
import time
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores import Vald
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
"""
To run, you should have a Vald cluster.
https://github.com/vdaas/vald/blob/main/docs/tutorial/get-started.md
"""
WAIT_TIME = 90
def _vald_from_texts(
metadatas: Optional[List[dict]] = None,
host: str = "localhost",
port: int = 8080,
skip_strict_exist_check: bool = True,
) -> Vald:
return Vald.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
host=host,
port=port,
skip_strict_exist_check=skip_strict_exist_check,
)
def test_vald_add_texts() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
texts = ["a", "b", "c"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch.add_texts(texts, metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_vald_delete() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
docsearch.delete(["foo"])
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 2
def test_vald_search() -> None:
"""Test end to end construction and search."""
docsearch = _vald_from_texts()
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score() -> None:
"""Test end to end construction and search with scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_search_by_vector() -> None:
"""Test end to end construction and search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_by_vector(embedding, k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score_by_vector() -> None:
"""Test end to end construction and search with scores by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_with_score_by_vector(embedding, k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
def test_vald_max_marginal_relevance_search_by_vector() -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.max_marginal_relevance_search_by_vector(
embedding, k=2, fetch_k=3
)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~edenai~audio_text_to_speech.py | from __future__ import annotations
import logging
from typing import Dict, List, Literal, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import Field, root_validator, validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiTextToSpeechTool(EdenaiTool):
"""Tool that queries the Eden AI Text to speech API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/audio_text_to_speech_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_text_to_speech"
description = (
"A wrapper around edenai Services text to speech."
"Useful for when you need to convert text to speech."
"""the output is a string representing the URL of the audio file,
or the path to the downloaded wav file """
)
language: Optional[str] = "en"
"""
language of the text passed to the model.
"""
# optional params see api documentation for more info
return_type: Literal["url", "wav"] = "url"
rate: Optional[int]
pitch: Optional[int]
volume: Optional[int]
audio_format: Optional[str]
sampling_rate: Optional[int]
voice_models: Dict[str, str] = Field(default_factory=dict)
voice: Literal["MALE", "FEMALE"]
"""voice option : 'MALE' or 'FEMALE' """
feature: str = "audio"
subfeature: str = "text_to_speech"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
@root_validator
def check_voice_models_key_is_provider_name(cls, values: dict) -> dict:
for key in values.get("voice_models", {}).keys():
if key not in values.get("providers", []):
raise ValueError(
"voice_model should be formatted like this "
"{<provider_name>: <its_voice_model>}"
)
return values
def _download_wav(self, url: str, save_path: str) -> None:
response = requests.get(url)
if response.status_code == 200:
with open(save_path, "wb") as f:
f.write(response.content)
else:
raise ValueError("Error while downloading wav file")
def _parse_response(self, response: list) -> str:
result = response[0]
if self.return_type == "url":
return result["audio_resource_url"]
else:
self._download_wav(result["audio_resource_url"], "audio.wav")
return "audio.wav"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"text": query,
"language": self.language,
"option": self.voice,
"return_type": self.return_type,
"rate": self.rate,
"pitch": self.pitch,
"volume": self.volume,
"audio_format": self.audio_format,
"sampling_rate": self.sampling_rate,
"settings": self.voice_models,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~cassandra.py | from __future__ import annotations
import typing
import uuid
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
if typing.TYPE_CHECKING:
from cassandra.cluster import Session
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
CVST = TypeVar("CVST", bound="Cassandra")
class Cassandra(VectorStore):
"""Wrapper around Apache Cassandra(R) for vector-store workloads.
To use it, you need a recent installation of the `cassio` library
and a Cassandra cluster / Astra DB instance supporting vector capabilities.
Visit the cassio.org website for extensive quickstarts and code examples.
Example:
.. code-block:: python
from langchain_community.vectorstores import Cassandra
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
session = ... # create your Cassandra session object
keyspace = 'my_keyspace' # the keyspace should exist already
table_name = 'my_vector_store'
vectorstore = Cassandra(embeddings, session, keyspace, table_name)
"""
_embedding_dimension: Union[int, None]
@staticmethod
def _filter_to_metadata(filter_dict: Optional[Dict[str, str]]) -> Dict[str, Any]:
if filter_dict is None:
return {}
else:
return filter_dict
def _get_embedding_dimension(self) -> int:
if self._embedding_dimension is None:
self._embedding_dimension = len(
self.embedding.embed_query("This is a sample sentence.")
)
return self._embedding_dimension
def __init__(
self,
embedding: Embeddings,
session: Session,
keyspace: str,
table_name: str,
ttl_seconds: Optional[int] = None,
) -> None:
try:
from cassio.vector import VectorTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
"""Create a vector table."""
self.embedding = embedding
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
#
self._embedding_dimension = None
#
self.table = VectorTable(
session=session,
keyspace=keyspace,
table=table_name,
embedding_dimension=self._get_embedding_dimension(),
primary_key_type="TEXT",
)
@property
def embeddings(self) -> Embeddings:
return self.embedding
@staticmethod
def _dont_flip_the_cos_score(distance: float) -> float:
# the identity
return distance
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The underlying VectorTable already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval:
"""
return self._dont_flip_the_cos_score
def delete_collection(self) -> None:
"""
Just an alias for `clear`
(to better align with other VectorStore implementations).
"""
self.clear()
def clear(self) -> None:
"""Empty the collection."""
self.table.clear()
def delete_by_document_id(self, document_id: str) -> None:
return self.table.delete(document_id)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
for document_id in ids:
self.delete_by_document_id(document_id)
return True
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 16,
ttl_seconds: Optional[int] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
batch_size (int): Number of concurrent requests to send to the server.
ttl_seconds (Optional[int], optional): Optional time-to-live
for the added texts.
Returns:
List[str]: List of IDs of the added texts.
"""
_texts = list(texts) # lest it be a generator or something
if ids is None:
ids = [uuid.uuid4().hex for _ in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
#
ttl_seconds = ttl_seconds or self.ttl_seconds
#
embedding_vectors = self.embedding.embed_documents(_texts)
#
for i in range(0, len(_texts), batch_size):
batch_texts = _texts[i : i + batch_size]
batch_embedding_vectors = embedding_vectors[i : i + batch_size]
batch_ids = ids[i : i + batch_size]
batch_metadatas = metadatas[i : i + batch_size]
futures = [
self.table.put_async(
text, embedding_vector, text_id, metadata, ttl_seconds
)
for text, embedding_vector, text_id, metadata in zip(
batch_texts, batch_embedding_vectors, batch_ids, batch_metadatas
)
]
for future in futures:
future.result()
return ids
# id-returning search facilities
def similarity_search_with_score_id_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
search_metadata = self._filter_to_metadata(filter)
#
hits = self.table.search(
embedding_vector=embedding,
top_k=k,
metric="cos",
metric_threshold=None,
metadata=search_metadata,
)
# We stick to 'cos' distance as it can be normalized on a 0-1 axis
# (1=most relevant), as required by this class' contract.
return [
(
Document(
page_content=hit["document"],
metadata=hit["metadata"],
),
0.5 + 0.5 * hit["distance"],
hit["document_id"],
)
for hit in hits
]
def similarity_search_with_score_id(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(
embedding=embedding_vector,
k=k,
filter=filter,
)
# id-unaware search facilities
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score), the most similar to the query vector.
"""
return [
(doc, score)
for (doc, score, docId) in self.similarity_search_with_score_id_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(
embedding_vector,
k,
filter=filter,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
return [
doc
for doc, _ in self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
)
]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(
embedding_vector,
k,
filter=filter,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
search_metadata = self._filter_to_metadata(filter)
prefetchHits = self.table.search(
embedding_vector=embedding,
top_k=fetch_k,
metric="cos",
metric_threshold=None,
metadata=search_metadata,
)
# let the mmr utility pick the *indices* in the above array
mmrChosenIndices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
[pfHit["embedding_vector"] for pfHit in prefetchHits],
k=k,
lambda_mult=lambda_mult,
)
mmrHits = [
pfHit
for pfIndex, pfHit in enumerate(prefetchHits)
if pfIndex in mmrChosenIndices
]
return [
Document(
page_content=hit["document"],
metadata=hit["metadata"],
)
for hit in mmrHits
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding_vector,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
)
@classmethod
def from_texts(
cls: Type[CVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
batch_size: int = 16,
**kwargs: Any,
) -> CVST:
"""Create a Cassandra vectorstore from raw texts.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
session: Session = kwargs["session"]
keyspace: str = kwargs["keyspace"]
table_name: str = kwargs["table_name"]
cassandraStore = cls(
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
)
cassandraStore.add_texts(texts=texts, metadatas=metadatas)
return cassandraStore
@classmethod
def from_documents(
cls: Type[CVST],
documents: List[Document],
embedding: Embeddings,
batch_size: int = 16,
**kwargs: Any,
) -> CVST:
"""Create a Cassandra vectorstore from a document list.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
session: Session = kwargs["session"]
keyspace: str = kwargs["keyspace"]
table_name: str = kwargs["table_name"]
return cls.from_texts(
texts=texts,
metadatas=metadatas,
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~psychic.py | from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class PsychicLoader(BaseLoader):
"""Load from `Psychic.dev`."""
def __init__(
self, api_key: str, account_id: str, connector_id: Optional[str] = None
):
"""Initialize with API key, connector id, and account id.
Args:
api_key: The Psychic API key.
account_id: The Psychic account id.
connector_id: The Psychic connector id.
"""
try:
from psychicapi import ConnectorId, Psychic # noqa: F401
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.account_id = account_id
def load(self) -> List[Document]:
"""Load documents."""
psychic_docs = self.psychic.get_documents(
connector_id=self.connector_id, account_id=self.account_id
)
return [
Document(
page_content=doc["content"],
metadata={"title": doc["title"], "source": doc["uri"]},
)
for doc in psychic_docs.documents
]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~deepinfra.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32"
class DeepInfraEmbeddings(BaseModel, Embeddings):
"""Deep Infra's embedding inference service.
To use, you should have the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example:
.. code-block:: python
from langchain_community.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
model_id: str = DEFAULT_MODEL_ID
"""Embeddings model to use."""
normalize: bool = False
"""whether to normalize the computed embeddings"""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model_id": self.model_id}
def _embed(self, input: List[str]) -> List[List[float]]:
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
# send request
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"inputs": input, "normalize": self.normalize, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
embeddings = t["embeddings"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.embed_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Deep Infra deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~self_hosted.py | import importlib.util
import logging
import pickle
from typing import Any, Callable, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a pipeline callable (or, more likely,
a key pointing to the model on the cluster's object store)
and returns text predictions for each document
in the batch.
"""
text = pipeline(prompt, *args, **kwargs)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _send_pipeline_to_device(pipeline: Any, device: int) -> Any:
"""Send a pipeline to a device on the cluster."""
if isinstance(pipeline, str):
with open(pipeline, "rb") as f:
pipeline = pickle.load(f)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline.device = torch.device(device)
pipeline.model = pipeline.model.to(pipeline.device)
return pipeline
class SelfHostedPipeline(LLM):
"""Model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example for custom pipeline and inference functions:
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def load_pipeline():
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
return pipeline(
"text-generation", model=model, tokenizer=tokenizer,
max_new_tokens=10
)
def inference_fn(pipeline, prompt, stop = None):
return pipeline(prompt)[0]["generated_text"]
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn
)
Example for <2GB model (can be serialized and sent directly to the server):
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
my_model = ...
llm = SelfHostedPipeline.from_pipeline(
pipeline=my_model,
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Example passing model path for larger models:
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
import runhouse as rh
import pickle
from transformers import pipeline
generator = pipeline(model="gpt2")
rh.blob(pickle.dumps(generator), path="models/pipeline.pkl"
).save().to(gpu, path="models")
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
pipeline_ref: Any #: :meta private:
client: Any #: :meta private:
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model load function."""
model_reqs: List[str] = ["./", "torch"]
"""Requirements to install on hardware to inference the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Init the pipeline with an auxiliary function.
The load function must be in global scope to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
super().__init__(**kwargs)
try:
import runhouse as rh
except ImportError:
raise ImportError(
"Could not import runhouse python package. "
"Please install it with `pip install runhouse`."
)
remote_load_fn = rh.function(fn=self.model_load_fn).to(
self.hardware, reqs=self.model_reqs
)
_load_fn_kwargs = self.load_fn_kwargs or {}
self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs)
self.client = rh.function(fn=self.inference_fn).to(
self.hardware, reqs=self.model_reqs
)
@classmethod
def from_pipeline(
cls,
pipeline: Any,
hardware: Any,
model_reqs: Optional[List[str]] = None,
device: int = 0,
**kwargs: Any,
) -> LLM:
"""Init the SelfHostedPipeline from a pipeline object or string."""
if not isinstance(pipeline, str):
logger.warning(
"Serializing pipeline to send to remote hardware. "
"Note, it can be quite slow"
"to serialize and send large models with each execution. "
"Consider sending the pipeline"
"to the cluster and passing the path to the pipeline instead."
)
load_fn_kwargs = {"pipeline": pipeline, "device": device}
return cls(
load_fn_kwargs=load_fn_kwargs,
model_load_fn=_send_pipeline_to_device,
hardware=hardware,
model_reqs=["transformers", "torch"] + (model_reqs or []),
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"hardware": self.hardware},
}
@property
def _llm_type(self) -> str:
return "self_hosted_llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~pipelineai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import (
BaseModel,
Extra,
Field,
SecretStr,
root_validator,
)
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PipelineAI(LLM, BaseModel):
"""PipelineAI large language models.
To use, you should have the ``pipeline-ai`` python package installed,
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import PipelineAI
pipeline = PipelineAI(pipeline_key="")
"""
pipeline_key: str = ""
"""The id or tag of the target pipeline"""
pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any pipeline parameters valid for `create` call not
explicitly specified."""
pipeline_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to pipeline_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["pipeline_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "pipeline_api_key", "PIPELINE_API_KEY")
)
values["pipeline_api_key"] = pipeline_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"pipeline_key": self.pipeline_key},
**{"pipeline_kwargs": self.pipeline_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pipeline_ai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ImportError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
)
client = PipelineCloud(token=self.pipeline_api_key.get_secret_value())
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f"A pipeline run should have a `result_preview` attribute."
f"Run was: {run}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the pipeline parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_loaders~facebook_messenger.py | import json
import logging
from pathlib import Path
from typing import Iterator, Union
from libs.core.langchain_core.chat_sessions import ChatSession
from libs.core.langchain_core.messages import HumanMessage
from langchain_community.chat_loaders.base import BaseChatLoader
logger = logging.getLogger(__file__)
class SingleFileFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a single file.
Args:
path (Union[Path, str]): The path to the chat file.
Attributes:
path (Path): The path to the chat file.
"""
def __init__(self, path: Union[Path, str]) -> None:
super().__init__()
self.file_path = path if isinstance(path, Path) else Path(path)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
with open(self.file_path) as f:
data = json.load(f)
sorted_data = sorted(data["messages"], key=lambda x: x["timestamp_ms"])
messages = []
for m in sorted_data:
messages.append(
HumanMessage(
content=m["content"], additional_kwargs={"sender": m["sender_name"]}
)
)
yield ChatSession(messages=messages)
class FolderFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a folder.
Args:
path (Union[str, Path]): The path to the directory
containing the chat files.
Attributes:
path (Path): The path to the directory containing the chat files.
"""
def __init__(self, path: Union[str, Path]) -> None:
super().__init__()
self.directory_path = Path(path) if isinstance(path, str) else path
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
inbox_path = self.directory_path / "inbox"
for _dir in inbox_path.iterdir():
if _dir.is_dir():
for _file in _dir.iterdir():
if _file.suffix.lower() == ".json":
file_loader = SingleFileFacebookMessengerChatLoader(path=_file)
for result in file_loader.lazy_load():
yield result
| [
"content"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~azure_blob_storage_file.py | import os
import tempfile
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class AzureBlobStorageFileLoader(BaseLoader):
"""Load from `Azure Blob Storage` files."""
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.blob = blob_name
"""Blob name."""
def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import BlobClient
except ImportError as exc:
raise ImportError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
client = BlobClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container, blob_name=self.blob
)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.container}/{self.blob}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f"{file_path}", "wb") as file:
blob_data = client.download_blob()
blob_data.readinto(file)
loader = UnstructuredFileLoader(file_path)
return loader.load()
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~agents~output_parsers~test_json.py | from libs.core.langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.json import JSONAgentOutputParser
def test_tool_usage() -> None:
parser = JSONAgentOutputParser()
_input = """ ```
{
"action": "search",
"action_input": "2+2"
}
```"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="2+2", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = JSONAgentOutputParser()
_input = """```
{
"action": "Final Answer",
"action_input": "4"
}
```"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~gooseai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""GooseAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "gooseai_api_key", "GOOSEAI_API_KEY")
)
values["gooseai_api_key"] = gooseai_api_key
try:
import openai
openai.api_key = gooseai_api_key.get_secret_value()
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
params = {**params, **kwargs}
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~router~llm_router.py | """Base classes for LLM-powered router chains."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type, cast
from libs.core.langchain_core.exceptions import OutputParserException
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.output_parsers import BaseOutputParser
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains import LLMChain
from langchain.chains.router.base import RouterChain
from langchain.output_parsers.json import parse_and_check_json_markdown
class LLMRouterChain(RouterChain):
"""A router chain that uses an LLM chain to perform routing."""
llm_chain: LLMChain
"""LLM chain used to perform routing"""
@root_validator()
def validate_prompt(cls, values: dict) -> dict:
prompt = values["llm_chain"].prompt
if prompt.output_parser is None:
raise ValueError(
"LLMRouterChain requires base llm_chain prompt to have an output"
" parser that converts LLM text output to a dictionary with keys"
" 'destination' and 'next_inputs'. Received a prompt with no output"
" parser."
)
return values
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the LLM chain prompt expects.
:meta private:
"""
return self.llm_chain.input_keys
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
super()._validate_outputs(outputs)
if not isinstance(outputs["next_inputs"], dict):
raise ValueError
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
output = cast(
Dict[str, Any],
self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs),
)
return output
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
output = cast(
Dict[str, Any],
await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs),
)
return output
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any
) -> LLMRouterChain:
"""Convenience constructor."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
class RouterOutputParser(BaseOutputParser[Dict[str, str]]):
"""Parser for output of router chain in the multi-prompt chain."""
default_destination: str = "DEFAULT"
next_inputs_type: Type = str
next_inputs_inner_key: str = "input"
def parse(self, text: str) -> Dict[str, Any]:
try:
expected_keys = ["destination", "next_inputs"]
parsed = parse_and_check_json_markdown(text, expected_keys)
if not isinstance(parsed["destination"], str):
raise ValueError("Expected 'destination' to be a string.")
if not isinstance(parsed["next_inputs"], self.next_inputs_type):
raise ValueError(
f"Expected 'next_inputs' to be {self.next_inputs_type}."
)
parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]}
if (
parsed["destination"].strip().lower()
== self.default_destination.lower()
):
parsed["destination"] = None
else:
parsed["destination"] = parsed["destination"].strip()
return parsed
except Exception as e:
raise OutputParserException(
f"Parsing text\n{text}\n raised following error:\n{e}"
)
| [
"llm_chain"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~transform.py | """Chain that runs an arbitrary python function."""
import functools
import logging
from typing import Any, Awaitable, Callable, Dict, List, Optional
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
logger = logging.getLogger(__name__)
class TransformChain(Chain):
"""Chain that transforms the chain output.
Example:
.. code-block:: python
from langchain.chains import TransformChain
transform_chain = TransformChain(input_variables=["text"],
output_variables["entities"], transform=func())
"""
input_variables: List[str]
"""The keys expected by the transform's input dictionary."""
output_variables: List[str]
"""The keys returned by the transform's output dictionary."""
transform_cb: Callable[[Dict[str, str]], Dict[str, str]] = Field(alias="transform")
"""The transform function."""
atransform_cb: Optional[
Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]
] = Field(None, alias="atransform")
"""The async coroutine transform function."""
@staticmethod
@functools.lru_cache
def _log_once(msg: str) -> None:
"""Log a message once.
:meta private:
"""
logger.warning(msg)
@property
def input_keys(self) -> List[str]:
"""Expect input keys.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Return output keys.
:meta private:
"""
return self.output_variables
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
return self.transform_cb(inputs)
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
if self.atransform_cb is not None:
return await self.atransform_cb(inputs)
else:
self._log_once(
"TransformChain's atransform is not provided, falling"
" back to synchronous transform"
)
return self.transform_cb(inputs)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~retrievers~test_you.py | import json
import os
from unittest import mock
from libs.core.langchain_core.documents import Document
from requests import Response
from langchain_community.retrievers.you import YouRetriever
class TestYouRetriever:
def test_get_relevant_documents(self) -> None:
os.environ["YDC_API_KEY"] = "MOCK KEY!"
retriever = YouRetriever()
with mock.patch("requests.get") as mock_get:
fixture = {"hits": [{"snippets": ["yo"]}, {"snippets": ["bird up"]}]}
response = Response()
response._content = bytes(json.dumps(fixture).encode("utf-8"))
mock_get.return_value = response
actual = retriever.get_relevant_documents("test")
assert actual == [
Document(page_content="yo"),
Document(page_content="bird up"),
]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~polars_dataframe.py | from typing import Any, Iterator
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.dataframe import BaseDataFrameLoader
class PolarsDataFrameLoader(BaseDataFrameLoader):
"""Load `Polars` DataFrame."""
def __init__(self, data_frame: Any, *, page_content_column: str = "text"):
"""Initialize with dataframe object.
Args:
data_frame: Polars DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
import polars as pl
if not isinstance(data_frame, pl.DataFrame):
raise ValueError(
f"Expected data_frame to be a pl.DataFrame, got {type(data_frame)}"
)
super().__init__(data_frame, page_content_column=page_content_column)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from dataframe."""
for row in self.data_frame.iter_rows(named=True):
text = row[self.page_content_column]
row.pop(self.page_content_column)
yield Document(page_content=text, metadata=row)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~yahoo_finance_news.py | from typing import Iterable, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.tools import BaseTool
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain_community.document_loaders.web_base import WebBaseLoader
class YahooFinanceNewsTool(BaseTool):
"""Tool that searches financial news on Yahoo Finance."""
name: str = "yahoo_finance_news"
description: str = (
"Useful for when you need to find financial news "
"about a public company. "
"Input should be a company ticker. "
"For example, AAPL for Apple, MSFT for Microsoft."
)
top_k: int = 10
"""The number of results to return."""
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f"Company ticker {query} not found."
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {query} not found."
links = []
try:
links = [n["link"] for n in company.news if n["type"] == "STORY"]
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f"No news found for company that searched with {query} ticker."
if not links:
return f"No news found for company that searched with {query} ticker."
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f"No news found for company that searched with {query} ticker."
return result
@staticmethod
def _format_results(docs: Iterable[Document], query: str) -> str:
doc_strings = [
"\n".join([doc.metadata["title"], doc.metadata["description"]])
for doc in docs
if query in doc.metadata["description"] or query in doc.metadata["title"]
]
return "\n\n".join(doc_strings)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~api~openapi~requests_chain.py | """request parser."""
import json
import re
from typing import Any
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.output_parsers import BaseOutputParser
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE
from langchain.chains.llm import LLMChain
class APIRequesterOutputParser(BaseOutputParser):
"""Parse the request and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
return json.dumps(json.loads(serialized_block, strict=False))
except json.JSONDecodeError:
return "ERROR serializing request."
def parse(self, llm_output: str) -> str:
"""Parse the request and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
message_match = re.search(r"```text(.*?)```", llm_output, re.DOTALL)
if message_match:
return f"MESSAGE: {message_match.group(1).strip()}"
return "ERROR making request"
@property
def _type(self) -> str:
return "api_requester"
class APIRequesterChain(LLMChain):
"""Get the request parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm_and_typescript(
cls,
llm: BaseLanguageModel,
typescript_definition: str,
verbose: bool = True,
**kwargs: Any,
) -> LLMChain:
"""Get the request parser."""
output_parser = APIRequesterOutputParser()
prompt = PromptTemplate(
template=REQUEST_TEMPLATE,
output_parser=output_parser,
partial_variables={"schema": typescript_definition},
input_variables=["instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
| [
"instructions"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~deeplake.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
try:
import deeplake
from deeplake import VectorStore as DeepLakeVectorStore
from deeplake.core.fast_forwarding import version_compare
_DEEPLAKE_INSTALLED = True
except ImportError:
_DEEPLAKE_INSTALLED = False
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class DeepLake(VectorStore):
"""`Activeloop Deep Lake` vector store.
We integrated deeplake's similarity search and filtering for fast prototyping.
Now, it supports Tensor Query Language (TQL) for production use cases
over billion rows.
Why Deep Lake?
- Not only stores embeddings, but also the original data with version control.
- Serverless, doesn't require another service and can be used with major
cloud providers (S3, GCS, etc.)
- More than just a multi-modal vector store. You can use the dataset
to fine-tune your own LLM models.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import DeepLake
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "./deeplake/"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding: Optional[Embeddings] = None,
embedding_function: Optional[Embeddings] = None,
read_only: bool = False,
ingestion_batch_size: int = 1000,
num_workers: int = 0,
verbose: bool = True,
exec_option: Optional[str] = None,
runtime: Optional[Dict] = None,
index_params: Optional[Dict[str, Union[int, str]]] = None,
**kwargs: Any,
) -> None:
"""Creates an empty DeepLakeVectorStore or loads an existing one.
The DeepLakeVectorStore is located at the specified ``path``.
Examples:
>>> # Create a vector store with default tensors
>>> deeplake_vectorstore = DeepLake(
... path = <path_for_storing_Data>,
... )
>>>
>>> # Create a vector store in the Deep Lake Managed Tensor Database
>>> data = DeepLake(
... path = "hub://org_id/dataset_name",
... runtime = {"tensor_db": True},
... )
Args:
dataset_path (str): Path to existing dataset or where to create
a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH.
token (str, optional): Activeloop token, for fetching credentials
to the dataset at path if it is a Deep Lake dataset.
Tokens are normally autogenerated. Optional.
embedding (Embeddings, optional): Function to convert
either documents or query. Optional.
embedding_function (Embeddings, optional): Function to convert
either documents or query. Optional. Deprecated: keeping this
parameter for backwards compatibility.
read_only (bool): Open dataset in read-only mode. Default is False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch.
Default is 1000.
num_workers (int): Number of workers to use during data ingestion.
Default is 0.
verbose (bool): Print dataset summary after each operation.
Default is True.
exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform
searching - "python", "compute_engine", "tensor_db" and auto.
Default is None.
- ``auto``- Selects the best execution method based on the storage
location of the Vector Store. It is the default option.
- ``python`` - Pure-python implementation that runs on the client.
WARNING: using this with big datasets can lead to memory
issues. Data can be stored anywhere.
- ``compute_engine`` - C++ implementation of the Deep Lake Compute
Engine that runs on the client. Can be used for any data stored in
or connected to Deep Lake. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database that is
responsible for storage and query execution. Only for data stored in
the Deep Lake Managed Database. Use runtime = {"db_engine": True}
during dataset creation.
runtime (Dict, optional): Parameters for creating the Vector Store in
Deep Lake's Managed Tensor Database. Not applicable when loading an
existing Vector Store. To create a Vector Store in the Managed Tensor
Database, set `runtime = {"tensor_db": True}`.
index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary
containing information about vector index that will be created. Defaults
to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from
``deeplake.constants``. The specified key-values override the default
ones.
- threshold: The threshold for the dataset size above which an index
will be created for the embedding tensor. When the threshold value
is set to -1, index creation is turned off. Defaults to -1, which
turns off the index.
- distance_metric: This key specifies the method of calculating the
distance between vectors when creating the vector database (VDB)
index. It can either be a string that corresponds to a member of
the DistanceType enumeration, or the string value itself.
- If no value is provided, it defaults to "L2".
- "L2" corresponds to DistanceType.L2_NORM.
- "COS" corresponds to DistanceType.COSINE_SIMILARITY.
- additional_params: Additional parameters for fine-tuning the index.
**kwargs: Other optional keyword arguments.
Raises:
ValueError: If some condition is not met.
"""
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.verbose = verbose
if _DEEPLAKE_INSTALLED is False:
raise ImportError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake[enterprise]`."
)
if (
runtime == {"tensor_db": True}
and version_compare(deeplake.__version__, "3.6.7") == -1
):
raise ImportError(
"To use tensor_db option you need to update deeplake to `3.6.7` or "
"higher. "
f"Currently installed deeplake version is {deeplake.__version__}. "
)
self.dataset_path = dataset_path
if embedding_function:
logger.warning(
"Using embedding function is deprecated and will be removed "
"in the future. Please use embedding instead."
)
self.vectorstore = DeepLakeVectorStore(
path=self.dataset_path,
embedding_function=embedding_function or embedding,
read_only=read_only,
token=token,
exec_option=exec_option,
verbose=verbose,
runtime=runtime,
index_params=index_params,
**kwargs,
)
self._embedding_function = embedding_function or embedding
self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id"
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Examples:
>>> ids = deeplake_vectorstore.add_texts(
... texts = <list_of_texts>,
... metadatas = <list_of_metadata_jsons>,
... ids = <list_of_ids>,
... )
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
embedding_function (Optional[Embeddings], optional): Embedding function
to use to convert the text into embeddings.
**kwargs (Any): Any additional keyword arguments passed is not supported
by this method.
Returns:
List[str]: List of IDs of the added texts.
"""
if kwargs:
unsupported_items = "`, `".join(set(kwargs.keys()))
raise TypeError(
f"`{unsupported_items}` is/are not a valid argument to add_text method"
)
kwargs = {}
if ids:
if self._id_tensor_name == "ids": # for backwards compatibility
kwargs["ids"] = ids
else:
kwargs["id"] = ids
if metadatas is None:
metadatas = [{}] * len(list(texts))
if not isinstance(texts, list):
texts = list(texts)
if texts is None:
raise ValueError("`texts` parameter shouldn't be None.")
elif len(texts) == 0:
raise ValueError("`texts` parameter shouldn't be empty.")
return self.vectorstore.add(
text=texts,
metadata=metadatas,
embedding_data=texts,
embedding_tensor="embedding",
embedding_function=self._embedding_function.embed_documents, # type: ignore
return_ids=True,
**kwargs,
)
def _search_tql(
self,
tql: Optional[str],
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Function for performing tql_search.
Args:
tql (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(
query=tql,
exec_option=exec_option,
)
metadatas = result["metadata"]
texts = result["text"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if kwargs:
unsupported_argument = next(iter(kwargs))
if kwargs[unsupported_argument] is not False:
raise ValueError(
f"specifying {unsupported_argument} is "
"not supported with tql search."
)
return docs
def _search(
self,
query: Optional[str] = None,
embedding: Optional[Union[List[float], np.ndarray]] = None,
embedding_function: Optional[Callable] = None,
k: int = 4,
distance_metric: Optional[str] = None,
use_maximal_marginal_relevance: bool = False,
fetch_k: Optional[int] = 20,
filter: Optional[Union[Dict, Callable]] = None,
return_score: bool = False,
exec_option: Optional[str] = None,
deep_memory: bool = False,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""
Return docs similar to query.
Args:
query (str, optional): Text to look up similar docs.
embedding (Union[List[float], np.ndarray], optional): Query's embedding.
embedding_function (Callable, optional): Function to convert `query`
into embedding.
k (int): Number of Documents to return.
distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for
Nuclear, `max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product.
filter (Union[Dict, Callable], optional): Additional filter prior
to the embedding search.
- ``Dict`` - Key-value search on tensors of htype json, on an
AND basis (a sample must satisfy all key-value filters to be True)
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with `deeplake.filter`.
use_maximal_marginal_relevance (bool): Use maximal marginal relevance.
fetch_k (int): Number of Documents for MMR algorithm.
return_score (bool): Return the score.
exec_option (str, optional): Supports 3 ways to perform searching.
Could be "python", "compute_engine" or "tensor_db".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified in
the Vector Store initialization. If True, the distance metric is set
to "deepmemory_distance", which represents the metric with which the
model was trained. The search is performed using the Deep Memory model.
If False, the distance metric is set to "COS" or whatever distance
metric user specifies.
**kwargs: Additional keyword arguments.
Returns:
List of Documents by the specified distance metric,
if return_score True, return a tuple of (Document, score)
Raises:
ValueError: if both `embedding` and `embedding_function` are not specified.
"""
if kwargs.get("tql"):
return self._search_tql(
tql=kwargs["tql"],
exec_option=exec_option,
return_score=return_score,
embedding=embedding,
embedding_function=embedding_function,
distance_metric=distance_metric,
use_maximal_marginal_relevance=use_maximal_marginal_relevance,
filter=filter,
)
if embedding_function:
if isinstance(embedding_function, Embeddings):
_embedding_function = embedding_function.embed_query
else:
_embedding_function = embedding_function
elif self._embedding_function:
_embedding_function = self._embedding_function.embed_query
else:
_embedding_function = None
if embedding is None:
if _embedding_function is None:
raise ValueError(
"Either `embedding` or `embedding_function` needs to be"
" specified."
)
embedding = _embedding_function(query) if query else None
if isinstance(embedding, list):
embedding = np.array(embedding, dtype=np.float32)
if len(embedding.shape) > 1:
embedding = embedding[0]
result = self.vectorstore.search(
embedding=embedding,
k=fetch_k if use_maximal_marginal_relevance else k,
distance_metric=distance_metric,
filter=filter,
exec_option=exec_option,
return_tensors=["embedding", "metadata", "text", self._id_tensor_name],
deep_memory=deep_memory,
)
scores = result["score"]
embeddings = result["embedding"]
metadatas = result["metadata"]
texts = result["text"]
if use_maximal_marginal_relevance:
lambda_mult = kwargs.get("lambda_mult", 0.5)
indices = maximal_marginal_relevance( # type: ignore
embedding, # type: ignore
embeddings,
k=min(k, len(texts)),
lambda_mult=lambda_mult,
)
scores = [scores[i] for i in indices]
texts = [texts[i] for i in indices]
metadatas = [metadatas[i] for i in indices]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search(
... query=<your_query>,
... k=<num_items>,
... exec_option=<preferred_exec_option>,
... )
>>> # Run tql search:
>>> data = vector_store.similarity_search(
... query=None,
... tql="SELECT * WHERE id == <id>",
... exec_option="compute_engine",
... )
Args:
k (int): Number of Documents to return. Defaults to 4.
query (str): Text to look up similar documents.
**kwargs: Additional keyword arguments include:
embedding (Callable): Embedding function to use. Defaults to None.
distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max'
for L-infinity, 'cos' for cosine, 'dot' for dot product.
Defaults to 'L2'.
filter (Union[Dict, Callable], optional): Additional filter
before embedding search.
- Dict: Key-value search on tensors of htype json,
(sample must satisfy all key-value filters)
Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}}
- Function: Compatible with `deeplake.filter`.
Defaults to None.
exec_option (str): Supports 3 ways to perform searching.
'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'.
- 'python': Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- 'compute_engine': C++ implementation of the Compute Engine for
the client. Not for in-memory or local datasets.
- 'tensor_db': Managed Tensor Database for storage and query.
Only for data in Deep Lake Managed Database.
Use `runtime = {"db_engine": True}` during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
query=query,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: Union[List[float], np.ndarray],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
embedding=embedding,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(
query=query,
k=k,
return_score=True,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(
embedding=embedding,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
**kwargs,
)
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Examples:
>>> # Search using an embedding
>>> data = vector_store.max_marginal_relevance_search(
... query = <query_to_search>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents for MMR algorithm.
lambda_mult: Value between 0 and 1. 0 corresponds
to maximum diversity and 1 to minimum.
Defaults to 0.5.
exec_option (str): Supports 3 ways to perform searching.
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database. To store
datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
**kwargs: Additional keyword arguments
Returns:
List of Documents selected by maximal marginal relevance.
Raises:
ValueError: when MRR search is on but embedding function is
not specified.
"""
embedding_function = kwargs.get("embedding") or self._embedding_function
if embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on"
" `creation` or during add call."
)
return self._search(
query=query,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
embedding_function=embedding_function, # type: ignore
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding, **kwargs)
deeplake_dataset.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
)
return deeplake_dataset
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
"""
filter = kwargs.get("filter")
delete_all = kwargs.get("delete_all")
self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all)
return True
@classmethod
def force_delete_by_path(cls, path: str) -> None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
deeplake.delete(path, large_ok=True, force=True)
def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
def ds(self) -> Any:
logger.warning(
"this method is deprecated and will be removed, "
"better to use `db.vectorstore.dataset` instead."
)
return self.vectorstore.dataset
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_alibabacloud_opensearch.py | import time
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.alibabacloud_opensearch import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz"]
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text))]
"""
settings = AlibabaCloudOpenSearchSettings(
endpoint="The endpoint of opensearch instance, If you want to access through
the public network, you need to enable public network access in the network
information of the instance details. If you want to access within
the Alibaba Cloud VPC, you can directly use the API domain name.",
instance_id="The identify of opensearch instance",
protocol (str): "Communication Protocol between SDK and Server, default is http.",
username="The username specified when purchasing the instance.",
password="The password specified when purchasing the instance.",
namespace (str) : "The instance data will be partitioned based on the
namespace field, If the namespace is enabled, you need to specify the
namespace field name during initialization. Otherwise, the queries cannot
be executed correctly, default is empty.",
table_name="The table name is specified when adding a table after completing
the instance configuration.",
field_name_mapping={
# insert data into opensearch based on the mapping name of the field.
"id": "The id field name map of index document.",
"document": "The text field name map of index document.",
"embedding": "The embedding field name map of index document,"
"the values must be in float16 multivalue type "
"and separated by commas.",
"metadata_x": "The metadata field name map of index document, "
"could specify multiple, The value field contains "
"mapping name and operator, the operator would be "
"used when executing metadata filter query",
},
)
"""
settings = AlibabaCloudOpenSearchSettings(
endpoint="ha-cn-5yd3fhdm102.public.ha.aliyuncs.com",
instance_id="ha-cn-5yd3fhdm102",
username="instance user name",
password="instance password",
table_name="instance table name",
field_name_mapping={
# insert data into opensearch based on the mapping name of the field.
"id": "id",
"document": "document",
"embedding": "embedding",
"string_field": "string_filed,=",
"int_field": "int_filed,=",
"float_field": "float_field,=",
"double_field": "double_field,=",
},
)
embeddings = FakeEmbeddingsWithOsDimension()
def test_create_alibabacloud_opensearch() -> None:
opensearch = create_alibabacloud_opensearch()
time.sleep(1)
output = opensearch.similarity_search("foo", k=10)
assert len(output) == 3
def test_alibabacloud_opensearch_with_text_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search(query="foo", k=1)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search(query="bar", k=1)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search(query="baz", k=1)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
def test_alibabacloud_opensearch_with_vector_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_by_vector(embeddings.embed_query("foo"), k=1)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search_by_vector(embeddings.embed_query("bar"), k=1)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search_by_vector(embeddings.embed_query("baz"), k=1)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
def test_alibabacloud_opensearch_with_text_and_meta_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search(
query="foo", search_filter={"string_field": "value1"}, k=1
)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search(
query="bar", search_filter={"int_field": 2}, k=1
)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search(
query="baz", search_filter={"float_field": 5.0}, k=1
)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
output = opensearch.similarity_search(
query="baz", search_filter={"float_field": 6.0}, k=1
)
assert len(output) == 0
def test_alibabacloud_opensearch_with_text_and_meta_score_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_with_relevance_scores(
query="foo",
search_filter={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
k=1,
)
assert output == [
(
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
),
0.0,
)
]
def test_alibabacloud_opensearch_delete_doc() -> None:
opensearch = create_alibabacloud_opensearch()
delete_result = opensearch.delete_documents_with_texts(["bar"])
assert delete_result
time.sleep(1)
search_result = opensearch.similarity_search(
query="bar", search_filter={"int_field": 2}, k=1
)
assert len(search_result) == 0
def create_alibabacloud_opensearch() -> AlibabaCloudOpenSearch:
metadatas = [
{
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
{
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
{
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
]
return AlibabaCloudOpenSearch.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
config=settings,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~llms~test_fireworks.py | """Test Fireworks chat model"""
import sys
import pytest
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture
from langchain_community.llms import Fireworks
if sys.version_info < (3, 9):
pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True)
@pytest.mark.requires("fireworks")
def test_api_key_is_string() -> None:
llm = Fireworks(fireworks_api_key="secret-api-key")
assert isinstance(llm.fireworks_api_key, SecretStr)
@pytest.mark.requires("fireworks")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = Fireworks(fireworks_api_key="secret-api-key")
print(llm.fireworks_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~roam.py | from pathlib import Path
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class RoamLoader(BaseLoader):
"""Load `Roam` files from a directory."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~memory~kg.py | from typing import Any, Dict, List, Type, Union
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
summary_message_cls: Type[BaseMessage] = SystemMessage
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summary_strings = []
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summary = f"On {entity}: {'. '.join(knowledge)}."
summary_strings.append(summary)
context: Union[str, List]
if not summary_strings:
context = [] if self.return_messages else ""
elif self.return_messages:
context = [
self.summary_message_cls(content=text) for text in summary_strings
]
else:
context = "\n".join(summary_strings)
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~llm_rails.py | """ This file is for LLMRails Embedding """
import logging
import os
from typing import List, Optional
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra
class LLMRailsEmbeddings(BaseModel, Embeddings):
"""LLMRails embedding models.
To use, you should have the environment
variable ``LLM_RAILS_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Model can be one of ["embedding-english-v1","embedding-multi-v1"]
Example:
.. code-block:: python
from langchain_community.embeddings import LLMRailsEmbeddings
cohere = LLMRailsEmbeddings(
model="embedding-english-v1", api_key="my-api-key"
)
"""
model: str = "embedding-english-v1"
"""Model name to use."""
api_key: Optional[str] = None
"""LLMRails API key."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
api_key = self.api_key or os.environ.get("LLM_RAILS_API_KEY")
if api_key is None:
logging.warning("Can't find LLMRails credentials in environment.")
raise ValueError("LLM_RAILS_API_KEY is not set")
response = requests.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": api_key},
json={"input": texts, "model": self.model},
timeout=60,
)
return [item["embedding"] for item in response.json()["data"]]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~johnsnowlabs.py | import os
import sys
from typing import Any, List
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra
class JohnSnowLabsEmbeddings(BaseModel, Embeddings):
"""JohnSnowLabs embedding models
To use, you should have the ``johnsnowlabs`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
embedding = JohnSnowLabsEmbeddings(model='embed_sentence.bert')
output = embedding.embed_query("foo bar")
""" # noqa: E501
model: Any = "embed_sentence.bert"
def __init__(
self,
model: Any = "embed_sentence.bert",
hardware_target: str = "cpu",
**kwargs: Any,
):
"""Initialize the johnsnowlabs model."""
super().__init__(**kwargs)
# 1) Check imports
try:
from johnsnowlabs import nlp
from nlu.pipe.pipeline import NLUPipeline
except ImportError as exc:
raise ImportError(
"Could not import johnsnowlabs python package. "
"Please install it with `pip install johnsnowlabs`."
) from exc
# 2) Start a Spark Session
try:
os.environ["PYSPARK_PYTHON"] = sys.executable
os.environ["PYSPARK_DRIVER_PYTHON"] = sys.executable
nlp.start(hardware_target=hardware_target)
except Exception as exc:
raise Exception("Failure starting Spark Session") from exc
# 3) Load the model
try:
if isinstance(model, str):
self.model = nlp.load(model)
elif isinstance(model, NLUPipeline):
self.model = model
else:
self.model = nlp.to_nlu_pipe(model)
except Exception as exc:
raise Exception("Failure loading model") from exc
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a JohnSnowLabs transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
df = self.model.predict(texts, output_level="document")
emb_col = None
for c in df.columns:
if "embedding" in c:
emb_col = c
return [vec.tolist() for vec in df[emb_col].tolist()]
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a JohnSnowLabs transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~docstore~wikipedia.py | """Wrapper around wikipedia API."""
from typing import Union
from libs.core.langchain_core.documents import Document
from langchain_community.docstore.base import Docstore
class Wikipedia(Docstore):
"""Wrapper around wikipedia API."""
def __init__(self) -> None:
"""Check that wikipedia package is installed."""
try:
import wikipedia # noqa: F401
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
def search(self, search: str) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Args:
search: search string.
Returns: a Document object or error message.
"""
import wikipedia
try:
page_content = wikipedia.page(search).content
url = wikipedia.page(search).url
result: Union[str, Document] = Document(
page_content=page_content, metadata={"page": url}
)
except wikipedia.PageError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
except wikipedia.DisambiguationError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
return result
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~storage~_lc_store.py | """Create a key-value store for any langchain serializable object."""
from typing import Callable, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.load import Serializable, dumps, loads
from libs.core.langchain_core.stores import BaseStore, ByteStore
from langchain.storage.encoder_backed import EncoderBackedStore
def _dump_as_bytes(obj: Serializable) -> bytes:
"""Return a bytes representation of a document."""
return dumps(obj).encode("utf-8")
def _dump_document_as_bytes(obj: Document) -> bytes:
"""Return a bytes representation of a document."""
if not isinstance(obj, Document):
raise TypeError("Expected a Document instance")
return dumps(obj).encode("utf-8")
def _load_document_from_bytes(serialized: bytes) -> Document:
"""Return a document from a bytes representation."""
obj = loads(serialized.decode("utf-8"))
if not isinstance(obj, Document):
raise TypeError(f"Expected a Document instance. Got {type(obj)}")
return obj
def _load_from_bytes(serialized: bytes) -> Serializable:
"""Return a document from a bytes representation."""
return loads(serialized.decode("utf-8"))
def _identity(x: str) -> str:
"""Return the same object."""
return x
# PUBLIC API
def create_lc_store(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Serializable]:
"""Create a store for langchain serializable objects from a bytes store.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_as_bytes,
_load_from_bytes,
)
def create_kv_docstore(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Document]:
"""Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_document_as_bytes,
_load_document_from_bytes,
)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~kuzu.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.graphs.kuzu_graph import KuzuGraph
class KuzuQAChain(Chain):
"""Question-answering against a graph by generating Cypher statements for Kùzu.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: KuzuGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT,
**kwargs: Any,
) -> KuzuQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_cypher)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~qdrant_sparse_vector_retriever.py | import uuid
from itertools import islice
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
cast,
)
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.vectorstores.qdrant import Qdrant, QdrantException
class QdrantSparseVectorRetriever(BaseRetriever):
"""Qdrant sparse vector retriever."""
client: Any
"""'qdrant_client' instance to use."""
collection_name: str
"""Qdrant collection name."""
sparse_vector_name: str
"""Name of the sparse vector to use."""
sparse_encoder: Callable[[str], Tuple[List[int], List[float]]]
"""Sparse encoder function to use."""
k: int = 4
"""Number of documents to return per query. Defaults to 4."""
filter: Optional[Any] = None
"""Qdrant qdrant_client.models.Filter to use for queries. Defaults to None."""
content_payload_key: str = "content"
"""Payload field containing the document content. Defaults to 'content'"""
metadata_payload_key: str = "metadata"
"""Payload field containing the document metadata. Defaults to 'metadata'."""
search_options: Dict[str, Any] = {}
"""Additional search options to pass to qdrant_client.QdrantClient.search()."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that 'qdrant_client' python package exists in environment."""
try:
from grpc import RpcError
from qdrant_client import QdrantClient, models
from qdrant_client.http.exceptions import UnexpectedResponse
except ImportError:
raise ImportError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
client = values["client"]
if not isinstance(client, QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
filter = values["filter"]
if filter is not None and not isinstance(filter, models.Filter):
raise ValueError(
f"filter should be an instance of qdrant_client.models.Filter, "
f"got {type(filter)}"
)
client = cast(QdrantClient, client)
collection_name = values["collection_name"]
sparse_vector_name = values["sparse_vector_name"]
try:
collection_info = client.get_collection(collection_name)
sparse_vectors_config = collection_info.config.params.sparse_vectors
if sparse_vector_name not in sparse_vectors_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain sparse vector named {sparse_vector_name}."
f"Did you mean one of {', '.join(sparse_vectors_config.keys())}?"
)
except (UnexpectedResponse, RpcError, ValueError):
raise QdrantException(
f"Qdrant collection {collection_name} does not exist."
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
from qdrant_client import QdrantClient, models
client = cast(QdrantClient, self.client)
query_indices, query_values = self.sparse_encoder(query)
results = client.search(
self.collection_name,
query_filter=self.filter,
query_vector=models.NamedSparseVector(
name=self.sparse_vector_name,
vector=models.SparseVector(
indices=query_indices,
values=query_values,
),
),
limit=self.k,
with_vectors=False,
**self.search_options,
)
return [
Qdrant._document_from_scored_point(
point, self.content_payload_key, self.metadata_payload_key
)
for point in results
]
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
from qdrant_client import QdrantClient
added_ids = []
client = cast(QdrantClient, self.client)
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
client.upsert(self.collection_name, points=points, **kwargs)
added_ids.extend(batch_ids)
return added_ids
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> Generator[Tuple[List[str], List[Any]], None, None]:
from qdrant_client import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the sparse embeddings for all the texts in a batch
batch_embeddings: List[Tuple[List[int], List[float]]] = [
self.sparse_encoder(text) for text in batch_texts
]
points = [
rest.PointStruct(
id=point_id,
vector={
self.sparse_vector_name: rest.SparseVector(
indices=sparse_vector[0],
values=sparse_vector[1],
)
},
payload=payload,
)
for point_id, sparse_vector, payload in zip(
batch_ids,
batch_embeddings,
Qdrant._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~gpt_router.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Union,
)
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from libs.core.langchain_core.language_models.llms import create_base_retry_decorator
from libs.core.langchain_core.messages import AIMessageChunk, BaseMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.adapters.openai import (
convert_dict_to_message,
convert_message_to_dict,
)
from langchain_community.chat_models.openai import _convert_delta_to_message_chunk
if TYPE_CHECKING:
from gpt_router.models import ChunkedGenerationResponse, GenerationResponse
logger = logging.getLogger(__name__)
DEFAULT_API_BASE_URL = "https://gpt-router-preview.writesonic.com"
class GPTRouterException(Exception):
"""Error with the `GPTRouter APIs`"""
class GPTRouterModel(BaseModel):
name: str
provider_name: str
def get_ordered_generation_requests(
models_priority_list: List[GPTRouterModel], **kwargs
):
"""
Return the body for the model router input.
"""
from gpt_router.models import GenerationParams, ModelGenerationRequest
return [
ModelGenerationRequest(
model_name=model.name,
provider_name=model.provider_name,
order=index + 1,
prompt_params=GenerationParams(**kwargs),
)
for index, model in enumerate(models_priority_list)
]
def _create_retry_decorator(
llm: GPTRouter,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
from gpt_router import exceptions
errors = [
exceptions.GPTRouterApiTimeoutError,
exceptions.GPTRouterInternalServerError,
exceptions.GPTRouterNotAvailableError,
exceptions.GPTRouterTooManyRequestsError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def completion_with_retry(
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, Generator[ChunkedGenerationResponse]]:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs
)
return llm.client.generate(
ordered_generation_requests=ordered_generation_requests,
is_stream=kwargs.get("stream", False),
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: GPTRouter,
models_priority_list: List[GPTRouterModel],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, AsyncGenerator[ChunkedGenerationResponse]]:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs
)
return await llm.client.agenerate(
ordered_generation_requests=ordered_generation_requests,
is_stream=kwargs.get("stream", False),
)
return await _completion_with_retry(**kwargs)
class GPTRouter(BaseChatModel):
"""GPTRouter by Writesonic Inc.
For more information, see https://gpt-router.writesonic.com/docs
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
models_priority_list: List[GPTRouterModel] = Field(min_items=1)
gpt_router_api_base: str = Field(default=None)
"""WriteSonic GPTRouter custom endpoint"""
gpt_router_api_key: Optional[str] = None
"""WriteSonic GPTRouter API Key"""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
max_retries: int = 4
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 256
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
values["gpt_router_api_base"] = get_from_dict_or_env(
values,
"gpt_router_api_base",
"GPT_ROUTER_API_BASE",
DEFAULT_API_BASE_URL,
)
values["gpt_router_api_key"] = get_from_dict_or_env(
values,
"gpt_router_api_key",
"GPT_ROUTER_API_KEY",
)
try:
from gpt_router.client import GPTRouterClient
except ImportError:
raise GPTRouterException(
"Could not import GPTRouter python package. "
"Please install it with `pip install GPTRouter`."
)
gpt_router_client = GPTRouterClient(
values["gpt_router_api_base"], values["gpt_router_api_key"]
)
values["client"] = gpt_router_client
return values
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"gpt_router_api_key": "GPT_ROUTER_API_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "gpt-router-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{"models_priority_list": self.models_priority_list},
**self._default_params,
}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GPTRouter API."""
return {
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": False}
response = completion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": False}
response = await acompletion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
return self._create_chat_result(response)
def _create_chat_generation_chunk(
self, data: Mapping[str, Any], default_chunk_class
):
chunk = _convert_delta_to_message_chunk(
{"content": data.get("text", "")}, default_chunk_class
)
finish_reason = data.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
return chunk, default_chunk_class
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
generator_response = completion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
for chunk in generator_response:
if chunk.event != "update":
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(
chunk.data, default_chunk_class
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
generator_response = acompletion_with_retry(
self,
messages=message_dicts,
models_priority_list=self.models_priority_list,
run_manager=run_manager,
**params,
)
async for chunk in await generator_response:
if chunk.event != "update":
continue
chunk, default_chunk_class = self._create_chat_generation_chunk(
chunk.data, default_chunk_class
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: GenerationResponse) -> ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message(
{
"role": "assistant",
"content": res.text,
}
)
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.finish_reason),
)
generations.append(gen)
llm_output = {"token_usage": response.meta, "model": response.model}
return ChatResult(generations=generations, llm_output=llm_output)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~mosaicml.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
class MosaicML(LLM):
"""MosaicML LLM service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import MosaicML
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
mosaic_llm = MosaicML(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "mosaic"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a MosaicML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = mosaic_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {"inputs": [prompt]}
payload.update(_model_kwargs)
payload.update(kwargs)
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
output_keys = ["data", "output", "outputs"]
for key in output_keys:
if key in parsed_response:
output_item = parsed_response[key]
break
else:
raise ValueError(
f"No valid key ({', '.join(output_keys)}) in response:"
f" {parsed_response}"
)
if isinstance(output_item, list):
text = output_item[0]
else:
text = output_item
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
# Older versions of the API include the input in the output response
if text.startswith(prompt):
text = text[len(prompt) :]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [
"PLACEHOLDER\n### Instruction:\n{instruction}\n### Response:\n"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~atlas.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
logger = logging.getLogger(__name__)
class AtlasDB(VectorStore):
"""`Atlas` vector store.
Atlas is the `Nomic's` neural database and `rhizomatic` instrument.
To use, you should have the ``nomic`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import AtlasDB
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AtlasDB("my_project", embeddings.embed_query)
"""
_ATLAS_DEFAULT_ID_FIELD = "atlas_id"
def __init__(
self,
name: str,
embedding_function: Optional[Embeddings] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
) -> None:
"""
Initialize the Atlas Client
Args:
name (str): The name of your project. If the project already exists,
it will be loaded.
embedding_function (Optional[Embeddings]): An optional function used for
embedding your data. If None, data will be embedded with
Nomic's embed model.
api_key (str): Your nomic API key
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally useful during development and testing.
"""
try:
import nomic
from nomic import AtlasProject
except ImportError:
raise ImportError(
"Could not import nomic python package. "
"Please install it with `pip install nomic`."
)
if api_key is None:
raise ValueError("No API key provided. Sign up at atlas.nomic.ai!")
nomic.login(api_key)
self._embedding_function = embedding_function
modality = "text"
if self._embedding_function is not None:
modality = "embedding"
# Check if the project exists, create it if not
self.project = AtlasProject(
name=name,
description=description,
modality=modality,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD,
)
self.project._latest_project_state()
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts.
"""
if (
metadatas is not None
and len(metadatas) > 0
and "text" in metadatas[0].keys()
):
raise ValueError("Cannot accept key text in metadata!")
texts = list(texts)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
# Embedding upload case
if self._embedding_function is not None:
_embeddings = self._embedding_function.embed_documents(texts)
embeddings = np.stack(_embeddings)
if metadatas is None:
data = [
{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]}
for i, _ in enumerate(texts)
]
else:
for i in range(len(metadatas)):
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
metadatas[i]["text"] = texts[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_embeddings(embeddings=embeddings, data=data)
# Text upload case
else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] = texts
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_text(data)
if refresh:
if len(self.project.indices) > 0:
with self.project.wait_for_project_lock():
self.project.rebuild_maps()
return ids
def create_index(self, **kwargs: Any) -> Any:
"""Creates an index in your project.
See
https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index
for full detail.
"""
with self.project.wait_for_project_lock():
return self.project.create_index(**kwargs)
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(
queries=embedding, k=k
)
data = self.project.get_data(ids=neighbors[0])
docs = [
Document(page_content=data[i]["text"], metadata=data[i])
for i, neighbor in enumerate(neighbors)
]
return docs
@classmethod
def from_texts(
cls: Type[AtlasDB],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally useful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
# Inject relevant kwargs
all_index_kwargs = {"name": name + "_index", "indexed_field": "text"}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
# Build project
atlasDB = cls(
name,
embedding_function=embedding,
api_key=api_key,
description="A description for your project",
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
@classmethod
def from_documents(
cls: Type[AtlasDB],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a list of documents.
Args:
name (str): Name of the collection to create.
api_key (str): Your nomic API key,
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if
it already exists. Default False.
Generally useful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
index_kwargs=index_kwargs,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~anyscale.py | """Anyscale Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from __future__ import annotations
import logging
import os
import sys
from typing import TYPE_CHECKING, Dict, Optional, Set
import requests
from libs.core.langchain_core.messages import BaseMessage
from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.adapters.openai import convert_message_to_dict
from langchain_community.chat_models.openai import (
ChatOpenAI,
_import_tiktoken,
)
from langchain_community.utils.openai import is_openai_v1
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class ChatAnyscale(ChatOpenAI):
"""`Anyscale` Chat large language models.
See https://www.anyscale.com/ for information about Anyscale.
To use, you should have the ``openai`` python package installed, and the
environment variable ``ANYSCALE_API_KEY`` set with your API key.
Alternatively, you can use the anyscale_api_key keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatAnyscale
chat = ChatAnyscale(model_name="meta-llama/Llama-2-7b-chat-hf")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anyscale-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
anyscale_api_key: SecretStr
"""AnyScale Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
anyscale_proxy: Optional[str] = None
"""To support explicit proxy for Anyscale."""
available_models: Optional[Set[str]] = None
"""Available models from Anyscale API."""
@staticmethod
def get_available_models(
anyscale_api_key: Optional[str] = None,
anyscale_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Anyscale API."""
try:
anyscale_api_key = anyscale_api_key or os.environ["ANYSCALE_API_KEY"]
except KeyError as e:
raise ValueError(
"Anyscale API key must be passed as keyword argument or "
"set in environment variable ANYSCALE_API_KEY.",
) from e
models_url = f"{anyscale_api_base}/models"
models_response = requests.get(
models_url,
headers={
"Authorization": f"Bearer {anyscale_api_key}",
},
)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}",
)
return {model["id"] for model in models_response.json()["data"]}
@root_validator(pre=True)
def validate_environment_override(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"anyscale_proxy",
"ANYSCALE_PROXY",
default="",
)
try:
import openai
except ImportError as e:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`.",
) from e
try:
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"base_url": values["openai_api_base"],
# To do: future support
# "organization": values["openai_organization"],
# "timeout": values["request_timeout"],
# "max_retries": values["max_retries"],
# "default_headers": values["default_headers"],
# "default_query": values["default_query"],
# "http_client": values["http_client"],
}
values["client"] = openai.OpenAI(**client_params).chat.completions
else:
values["client"] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`.",
) from exc
if "model_name" not in values.keys():
values["model_name"] = DEFAULT_MODEL
model_name = values["model_name"]
available_models = cls.get_available_models(
values["openai_api_key"],
values["openai_api_base"],
)
if model_name not in available_models:
raise ValueError(
f"Model name {model_name} not found in available models: "
f"{available_models}.",
)
values["available_models"] = available_models
return values
def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) -> int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~merge.py | from typing import Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class MergedDataLoader(BaseLoader):
"""Merge documents from a list of loaders"""
def __init__(self, loaders: List):
"""Initialize with a list of loaders"""
self.loaders = loaders
def lazy_load(self) -> Iterator[Document]:
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
# Check if lazy_load is implemented
try:
data = loader.lazy_load()
except NotImplementedError:
data = loader.load()
for document in data:
yield document
def load(self) -> List[Document]:
"""Load docs."""
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~document_transformers~test_beautiful_soup_transformer.py | """Unit tests for beautiful soup document transformer."""
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_transformers import BeautifulSoupTransformer
@pytest.mark.requires("bs4")
def test_transform_empty_html() -> None:
bs_transformer = BeautifulSoupTransformer()
empty_html = "<html></html>"
documents = [Document(page_content=empty_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == ""
@pytest.mark.requires("bs4")
def test_extract_paragraphs() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html><h1>Header</h1><p>First paragraph.</p>"
"<p>Second paragraph.</p><h1>Ignore at end</h1></html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "First paragraph. Second paragraph."
@pytest.mark.requires("bs4")
def test_strip_whitespace() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html><h1>Header</h1><p><span>First</span> paragraph.</p>"
"<p>Second paragraph. </p></html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "First paragraph. Second paragraph."
@pytest.mark.requires("bs4")
def test_extract_html() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html>Begin of html tag"
"<h1>Header</h1>"
"<p>First paragraph.</p>"
"Middle of html tag"
"<p>Second paragraph.</p>"
"End of html tag"
"</html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["html", "p"]
)
assert docs_transformed[0].page_content == (
"Begin of html tag "
"Header First paragraph. "
"Middle of html tag "
"Second paragraph. "
"End of html tag"
)
@pytest.mark.requires("bs4")
def test_remove_style() -> None:
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
"<html><style>my_funky_style</style><p>First paragraph.</p></html>"
)
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["html"]
)
assert docs_transformed[0].page_content == "First paragraph."
@pytest.mark.requires("bs4")
def test_remove_nested_tags() -> None:
"""
If a tag_to_extract is _inside_ an unwanted_tag, it should be removed
(e.g. a <p> inside a <table> if <table> is unwanted).)
If an unwanted tag is _inside_ a tag_to_extract, it should be removed,
but the rest of the tag_to_extract should stay.
This means that "unwanted_tags" have a higher "priority" than "tags_to_extract".
"""
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
"<html><style>my_funky_style</style>"
"<table><td><p>First paragraph, inside a table.</p></td></table>"
"<p>Second paragraph<table><td> with a cell </td></table>.</p>"
"</html>"
)
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(
documents, unwanted_tags=["script", "style", "table"]
)
assert docs_transformed[0].page_content == "Second paragraph."
@pytest.mark.requires("bs4")
def test_remove_unwanted_lines() -> None:
bs_transformer = BeautifulSoupTransformer()
with_lines_html = "<html>\n\n<p>First \n\n paragraph.</p>\n</html>\n\n"
documents = [Document(page_content=with_lines_html)]
docs_transformed = bs_transformer.transform_documents(documents, remove_lines=True)
assert docs_transformed[0].page_content == "First paragraph."
@pytest.mark.requires("bs4")
def test_do_not_remove_repeated_content() -> None:
bs_transformer = BeautifulSoupTransformer()
with_lines_html = "<p>1\n1\n1\n1</p>"
documents = [Document(page_content=with_lines_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "1 1 1 1"
@pytest.mark.requires("bs4")
def test_extract_nested_tags() -> None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'>"
"<p><span>First</span> paragraph.</p>"
"<p>Second <div>paragraph.</div></p>"
"<p><p>Third paragraph.</p></p>"
"</div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert (
docs_transformed[0].page_content
== "First paragraph. Second paragraph. Third paragraph."
)
@pytest.mark.requires("bs4")
def test_extract_more_nested_tags() -> None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'>"
"<p><span>First</span> paragraph.</p>"
"<p>Second paragraph.</p>"
"<p>Third paragraph with a list:"
"<ul>"
"<li>First list item.</li>"
"<li>Second list item.</li>"
"</ul>"
"</p>"
"<p>Fourth paragraph.</p>"
"</div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
"First paragraph. Second paragraph. "
"Third paragraph with a list: "
"First list item. Second list item. "
"Fourth paragraph."
)
@pytest.mark.requires("bs4")
def test_transform_keeps_order() -> None:
bs_transformer = BeautifulSoupTransformer()
multiple_tags_html = (
"<h1>First heading.</h1>"
"<p>First paragraph.</p>"
"<h1>Second heading.</h1>"
"<p>Second paragraph.</p>"
)
documents = [Document(page_content=multiple_tags_html)]
# Order of "p" and "h1" in the "tags_to_extract" parameter is NOT important here:
# it will keep the order of the original HTML.
docs_transformed_p_then_h1 = bs_transformer.transform_documents(
documents, tags_to_extract=["p", "h1"]
)
assert (
docs_transformed_p_then_h1[0].page_content
== "First heading. First paragraph. Second heading. Second paragraph."
)
# Recreating `documents` because transform_documents() modifies it.
documents = [Document(page_content=multiple_tags_html)]
# changing the order of "h1" and "p" in "tags_to_extract" does NOT flip the order
# of the extracted tags:
docs_transformed_h1_then_p = bs_transformer.transform_documents(
documents, tags_to_extract=["h1", "p"]
)
assert (
docs_transformed_h1_then_p[0].page_content
== "First heading. First paragraph. Second heading. Second paragraph."
)
@pytest.mark.requires("bs4")
def test_extracts_href() -> None:
bs_transformer = BeautifulSoupTransformer()
multiple_tags_html = (
"<h1>First heading.</h1>"
"<p>First paragraph with an <a href='http://example.com'>example</a></p>"
"<p>Second paragraph with an <a>a tag without href</a></p>"
)
documents = [Document(page_content=multiple_tags_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["p"]
)
assert docs_transformed[0].page_content == (
"First paragraph with an example (http://example.com) "
"Second paragraph with an a tag without href"
)
@pytest.mark.requires("bs4")
def test_invalid_html() -> None:
bs_transformer = BeautifulSoupTransformer()
invalid_html_1 = "<html><h1>First heading."
invalid_html_2 = "<html 1234 xyz"
documents = [
Document(page_content=invalid_html_1),
Document(page_content=invalid_html_2),
]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["h1"]
)
assert docs_transformed[0].page_content == "First heading."
assert docs_transformed[1].page_content == ""
| [] |
2024-01-10 | mth93/langchain | libs~core~langchain_core~prompts~few_shot.py | """Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from libs.core.langchain_core.messages import BaseMessage, get_buffer_string
from libs.core.langchain_core.prompts.chat import (
BaseChatPromptTemplate,
BaseMessagePromptTemplate,
)
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from libs.core.langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Union[Literal["f-string"], Literal["jinja2"]] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from libs.core.langchain_core.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from libs.core.langchain_core.prompts import SemanticSimilarityExampleSelector
from libs.core.langchain_core.embeddings import OpenAIEmbeddings
from libs.core.langchain_core.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from libs.core.langchain_core import SystemMessage
from libs.core.langchain_core.prompts import HumanMessagePromptTemplate
from libs.core.langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?"))
# Use within an LLM
from libs.core.langchain_core.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
| [
"f-string",
"False"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~dalle_image_generator.py | """Utility that calls OpenAI's Dall-E Image Generator."""
import logging
import os
from typing import Any, Dict, Mapping, Optional, Tuple, Union
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from libs.core.langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
class DallEAPIWrapper(BaseModel):
"""Wrapper for OpenAI's DALL-E Image Generator.
https://platform.openai.com/docs/guides/images/generations?context=node
Usage instructions:
1. `pip install openai`
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="dall-e-2", alias="model")
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
n: int = 1
"""Number of images to generate"""
size: str = "1024x1024"
"""Size of image to generate"""
separator: str = "\n"
"""Separator to use when multiple URLs are returned."""
quality: Optional[str] = "standard"
"""Quality of the image that will be generated"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
or None
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).images
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(**client_params).images
elif not values.get("client"):
values["client"] = openai.Image
else:
pass
return values
def run(self, query: str) -> str:
"""Run query through OpenAI and parse result."""
if is_openai_v1():
response = self.client.generate(
prompt=query,
n=self.n,
size=self.size,
model=self.model_name,
quality=self.quality,
)
image_urls = self.separator.join([item.url for item in response.data])
else:
response = self.client.create(
prompt=query, n=self.n, size=self.size, model=self.model_name
)
image_urls = self.separator.join([item["url"] for item in response["data"]])
return image_urls if image_urls else "No image was generated"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~google_cloud~texttospeech.py | from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Any, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.tools import BaseTool
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import texttospeech
def _import_google_cloud_texttospeech() -> Any:
try:
from google.cloud import texttospeech
except ImportError as e:
raise ImportError(
"Cannot import google.cloud.texttospeech, please install "
"`pip install google-cloud-texttospeech`."
) from e
return texttospeech
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding) -> Optional[str]:
texttospeech = _import_google_cloud_texttospeech()
ENCODING_FILE_EXTENSION_MAP = {
texttospeech.AudioEncoding.LINEAR16: ".wav",
texttospeech.AudioEncoding.MP3: ".mp3",
texttospeech.AudioEncoding.OGG_OPUS: ".ogg",
texttospeech.AudioEncoding.MULAW: ".wav",
texttospeech.AudioEncoding.ALAW: ".wav",
}
return ENCODING_FILE_EXTENSION_MAP.get(encoding)
class GoogleCloudTextToSpeechTool(BaseTool):
"""Tool that queries the Google Cloud Text to Speech API.
In order to set this up, follow instructions at:
https://cloud.google.com/text-to-speech/docs/before-you-begin
"""
name: str = "google_cloud_texttospeech"
description: str = (
"A wrapper around Google Cloud Text-to-Speech. "
"Useful for when you need to synthesize audio from text. "
"It supports multiple languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
_client: Any
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
texttospeech = _import_google_cloud_texttospeech()
super().__init__(**kwargs)
self._client = texttospeech.TextToSpeechClient(
client_info=get_client_info(module="text-to-speech")
)
def _run(
self,
input_text: str,
language_code: str = "en-US",
ssml_gender: Optional[texttospeech.SsmlVoiceGender] = None,
audio_encoding: Optional[texttospeech.AudioEncoding] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
texttospeech = _import_google_cloud_texttospeech()
ssml_gender = ssml_gender or texttospeech.SsmlVoiceGender.NEUTRAL
audio_encoding = audio_encoding or texttospeech.AudioEncoding.MP3
response = self._client.synthesize_speech(
input=texttospeech.SynthesisInput(text=input_text),
voice=texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=ssml_gender
),
audio_config=texttospeech.AudioConfig(audio_encoding=audio_encoding),
)
suffix = _encoding_file_extension_map(audio_encoding)
with tempfile.NamedTemporaryFile(mode="bx", suffix=suffix, delete=False) as f:
f.write(response.audio_content)
return f.name
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~tiledb.py | """Wrapper around TileDB vector database."""
from __future__ import annotations
import pickle
import random
import sys
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
INDEX_METRICS = frozenset(["euclidean"])
DEFAULT_METRIC = "euclidean"
DOCUMENTS_ARRAY_NAME = "documents"
VECTOR_INDEX_NAME = "vectors"
MAX_UINT64 = np.iinfo(np.dtype("uint64")).max
MAX_FLOAT_32 = np.finfo(np.dtype("float32")).max
MAX_FLOAT = sys.float_info.max
def dependable_tiledb_import() -> Any:
"""Import tiledb-vector-search if available, otherwise raise error."""
try:
import tiledb as tiledb
import tiledb.vector_search as tiledb_vs
except ImportError:
raise ValueError(
"Could not import tiledb-vector-search python package. "
"Please install it with `conda install -c tiledb tiledb-vector-search` "
"or `pip install tiledb-vector-search`"
)
return tiledb_vs, tiledb
def get_vector_index_uri_from_group(group: Any) -> str:
"""Get the URI of the vector index."""
return group[VECTOR_INDEX_NAME].uri
def get_documents_array_uri_from_group(group: Any) -> str:
"""Get the URI of the documents array from group.
Args:
group: TileDB group object.
Returns:
URI of the documents array.
"""
return group[DOCUMENTS_ARRAY_NAME].uri
def get_vector_index_uri(uri: str) -> str:
"""Get the URI of the vector index."""
return f"{uri}/{VECTOR_INDEX_NAME}"
def get_documents_array_uri(uri: str) -> str:
"""Get the URI of the documents array."""
return f"{uri}/{DOCUMENTS_ARRAY_NAME}"
class TileDB(VectorStore):
"""TileDB vector store.
To use, you should have the ``tiledb-vector-search`` python package installed.
Example:
.. code-block:: python
from langchain_community import TileDB
embeddings = OpenAIEmbeddings()
db = TileDB(embeddings, index_uri, metric)
"""
def __init__(
self,
embedding: Embeddings,
index_uri: str,
metric: str,
*,
vector_index_uri: str = "",
docs_array_uri: str = "",
config: Optional[Mapping[str, Any]] = None,
timestamp: Any = None,
**kwargs: Any,
):
"""Initialize with necessary components."""
self.embedding = embedding
self.embedding_function = embedding.embed_query
self.index_uri = index_uri
self.metric = metric
self.config = config
tiledb_vs, tiledb = dependable_tiledb_import()
with tiledb.scope_ctx(ctx_or_config=config):
index_group = tiledb.Group(self.index_uri, "r")
self.vector_index_uri = (
vector_index_uri
if vector_index_uri != ""
else get_vector_index_uri_from_group(index_group)
)
self.docs_array_uri = (
docs_array_uri
if docs_array_uri != ""
else get_documents_array_uri_from_group(index_group)
)
index_group.close()
group = tiledb.Group(self.vector_index_uri, "r")
self.index_type = group.meta.get("index_type")
group.close()
self.timestamp = timestamp
if self.index_type == "FLAT":
self.vector_index = tiledb_vs.flat_index.FlatIndex(
uri=self.vector_index_uri,
config=self.config,
timestamp=self.timestamp,
**kwargs,
)
elif self.index_type == "IVF_FLAT":
self.vector_index = tiledb_vs.ivf_flat_index.IVFFlatIndex(
uri=self.vector_index_uri,
config=self.config,
timestamp=self.timestamp,
**kwargs,
)
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding
def process_index_results(
self,
ids: List[int],
scores: List[float],
*,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
score_threshold: float = MAX_FLOAT,
) -> List[Tuple[Document, float]]:
"""Turns TileDB results into a list of documents and scores.
Args:
ids: List of indices of the documents in the index.
scores: List of distances of the documents in the index.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
score_threshold: Optional, a floating point value to filter the
resulting set of retrieved docs
Returns:
List of Documents and scores.
"""
tiledb_vs, tiledb = dependable_tiledb_import()
docs = []
docs_array = tiledb.open(
self.docs_array_uri, "r", timestamp=self.timestamp, config=self.config
)
for idx, score in zip(ids, scores):
if idx == 0 and score == 0:
continue
if idx == MAX_UINT64 and score == MAX_FLOAT_32:
continue
doc = docs_array[idx]
if doc is None or len(doc["text"]) == 0:
raise ValueError(f"Could not find document for id {idx}, got {doc}")
pickled_metadata = doc.get("metadata")
result_doc = Document(page_content=str(doc["text"][0]))
if pickled_metadata is not None:
metadata = pickle.loads(
np.array(pickled_metadata.tolist()).astype(np.uint8).tobytes()
)
result_doc.metadata = metadata
if filter is not None:
filter = {
key: [value] if not isinstance(value, list) else value
for key, value in filter.items()
}
if all(
result_doc.metadata.get(key) in value
for key, value in filter.items()
):
docs.append((result_doc, score))
else:
docs.append((result_doc, score))
docs_array.close()
docs = [(doc, score) for doc, score in docs if score <= score_threshold]
return docs[:k]
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
*,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
nprobe: Optional, number of partitions to check if using IVF_FLAT index
score_threshold: Optional, a floating point value to filter the
resulting set of retrieved docs
Returns:
List of documents most similar to the query text and distance
in float for each. Lower score represents more similarity.
"""
if "score_threshold" in kwargs:
score_threshold = kwargs.pop("score_threshold")
else:
score_threshold = MAX_FLOAT
d, i = self.vector_index.query(
np.array([np.array(embedding).astype(np.float32)]).astype(np.float32),
k=k if filter is None else fetch_k,
**kwargs,
)
return self.process_index_results(
ids=i[0], scores=d[0], filter=filter, k=k, score_threshold=score_threshold
)
def similarity_search_with_score(
self,
query: str,
*,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of documents most similar to the query text with
Distance as float. Lower score represents more similarity.
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(
embedding,
k=k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding,
k=k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filter=filter, fetch_k=fetch_k, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
*,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
"""
if "score_threshold" in kwargs:
score_threshold = kwargs.pop("score_threshold")
else:
score_threshold = MAX_FLOAT
scores, indices = self.vector_index.query(
np.array([np.array(embedding).astype(np.float32)]).astype(np.float32),
k=fetch_k if filter is None else fetch_k * 2,
**kwargs,
)
results = self.process_index_results(
ids=indices[0],
scores=scores[0],
filter=filter,
k=fetch_k if filter is None else fetch_k * 2,
score_threshold=score_threshold,
)
embeddings = [
self.embedding.embed_documents([doc.page_content])[0] for doc, _ in results
]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
docs_and_scores = []
for i in mmr_selected:
docs_and_scores.append(results[i])
return docs_and_scores
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering (if needed) to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
@classmethod
def create(
cls,
index_uri: str,
index_type: str,
dimensions: int,
vector_type: np.dtype,
*,
metadatas: bool = True,
config: Optional[Mapping[str, Any]] = None,
) -> None:
tiledb_vs, tiledb = dependable_tiledb_import()
with tiledb.scope_ctx(ctx_or_config=config):
try:
tiledb.group_create(index_uri)
except tiledb.TileDBError as err:
raise err
group = tiledb.Group(index_uri, "w")
vector_index_uri = get_vector_index_uri(group.uri)
docs_uri = get_documents_array_uri(group.uri)
if index_type == "FLAT":
tiledb_vs.flat_index.create(
uri=vector_index_uri,
dimensions=dimensions,
vector_type=vector_type,
config=config,
)
elif index_type == "IVF_FLAT":
tiledb_vs.ivf_flat_index.create(
uri=vector_index_uri,
dimensions=dimensions,
vector_type=vector_type,
config=config,
)
group.add(vector_index_uri, name=VECTOR_INDEX_NAME)
# Create TileDB array to store Documents
# TODO add a Document store API to tiledb-vector-search to allow storing
# different types of objects and metadata in a more generic way.
dim = tiledb.Dim(
name="id",
domain=(0, MAX_UINT64 - 1),
dtype=np.dtype(np.uint64),
)
dom = tiledb.Domain(dim)
text_attr = tiledb.Attr(name="text", dtype=np.dtype("U1"), var=True)
attrs = [text_attr]
if metadatas:
metadata_attr = tiledb.Attr(name="metadata", dtype=np.uint8, var=True)
attrs.append(metadata_attr)
schema = tiledb.ArraySchema(
domain=dom,
sparse=True,
allows_duplicates=False,
attrs=attrs,
)
tiledb.Array.create(docs_uri, schema)
group.add(docs_uri, name=DOCUMENTS_ARRAY_NAME)
group.close()
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
index_uri: str,
*,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
metric: str = DEFAULT_METRIC,
index_type: str = "FLAT",
config: Optional[Mapping[str, Any]] = None,
index_timestamp: int = 0,
**kwargs: Any,
) -> TileDB:
if metric not in INDEX_METRICS:
raise ValueError(
(
f"Unsupported distance metric: {metric}. "
f"Expected one of {list(INDEX_METRICS)}"
)
)
tiledb_vs, tiledb = dependable_tiledb_import()
input_vectors = np.array(embeddings).astype(np.float32)
cls.create(
index_uri=index_uri,
index_type=index_type,
dimensions=input_vectors.shape[1],
vector_type=input_vectors.dtype,
metadatas=metadatas is not None,
config=config,
)
with tiledb.scope_ctx(ctx_or_config=config):
if not embeddings:
raise ValueError("embeddings must be provided to build a TileDB index")
vector_index_uri = get_vector_index_uri(index_uri)
docs_uri = get_documents_array_uri(index_uri)
if ids is None:
ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts]
external_ids = np.array(ids).astype(np.uint64)
tiledb_vs.ingestion.ingest(
index_type=index_type,
index_uri=vector_index_uri,
input_vectors=input_vectors,
external_ids=external_ids,
index_timestamp=index_timestamp if index_timestamp != 0 else None,
config=config,
**kwargs,
)
with tiledb.open(docs_uri, "w") as A:
if external_ids is None:
external_ids = np.zeros(len(texts), dtype=np.uint64)
for i in range(len(texts)):
external_ids[i] = i
data = {}
data["text"] = np.array(texts)
if metadatas is not None:
metadata_attr = np.empty([len(metadatas)], dtype=object)
i = 0
for metadata in metadatas:
metadata_attr[i] = np.frombuffer(
pickle.dumps(metadata), dtype=np.uint8
)
i += 1
data["metadata"] = metadata_attr
A[external_ids] = data
return cls(
embedding=embedding,
index_uri=index_uri,
metric=metric,
config=config,
**kwargs,
)
def delete(
self, ids: Optional[List[str]] = None, timestamp: int = 0, **kwargs: Any
) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
timestamp: Optional timestamp to delete with.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
external_ids = np.array(ids).astype(np.uint64)
self.vector_index.delete_batch(
external_ids=external_ids, timestamp=timestamp if timestamp != 0 else None
)
return True
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
timestamp: int = 0,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional ids of each text object.
timestamp: Optional timestamp to write new texts with.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
tiledb_vs, tiledb = dependable_tiledb_import()
embeddings = self.embedding.embed_documents(list(texts))
if ids is None:
ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts]
external_ids = np.array(ids).astype(np.uint64)
vectors = np.empty((len(embeddings)), dtype="O")
for i in range(len(embeddings)):
vectors[i] = np.array(embeddings[i], dtype=np.float32)
self.vector_index.update_batch(
vectors=vectors,
external_ids=external_ids,
timestamp=timestamp if timestamp != 0 else None,
)
docs = {}
docs["text"] = np.array(texts)
if metadatas is not None:
metadata_attr = np.empty([len(metadatas)], dtype=object)
i = 0
for metadata in metadatas:
metadata_attr[i] = np.frombuffer(pickle.dumps(metadata), dtype=np.uint8)
i += 1
docs["metadata"] = metadata_attr
docs_array = tiledb.open(
self.docs_array_uri,
"w",
timestamp=timestamp if timestamp != 0 else None,
config=self.config,
)
docs_array[external_ids] = docs
docs_array.close()
return ids
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
metric: str = DEFAULT_METRIC,
index_uri: str = "/tmp/tiledb_array",
index_type: str = "FLAT",
config: Optional[Mapping[str, Any]] = None,
index_timestamp: int = 0,
**kwargs: Any,
) -> TileDB:
"""Construct a TileDB index from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
ids: Optional ids of each text object.
metric: Metric to use for indexing. Defaults to "euclidean".
index_uri: The URI to write the TileDB arrays
index_type: Optional, Vector index type ("FLAT", IVF_FLAT")
config: Optional, TileDB config
index_timestamp: Optional, timestamp to write new texts with.
Example:
.. code-block:: python
from langchain_community import TileDB
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = TileDB.from_texts(texts, embeddings)
"""
embeddings = []
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts=texts,
embeddings=embeddings,
embedding=embedding,
metadatas=metadatas,
ids=ids,
metric=metric,
index_uri=index_uri,
index_type=index_type,
config=config,
index_timestamp=index_timestamp,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
index_uri: str,
*,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
metric: str = DEFAULT_METRIC,
index_type: str = "FLAT",
config: Optional[Mapping[str, Any]] = None,
index_timestamp: int = 0,
**kwargs: Any,
) -> TileDB:
"""Construct TileDB index from embeddings.
Args:
text_embeddings: List of tuples of (text, embedding)
embedding: Embedding function to use.
index_uri: The URI to write the TileDB arrays
metadatas: List of metadata dictionaries to associate with documents.
metric: Optional, Metric to use for indexing. Defaults to "euclidean".
index_type: Optional, Vector index type ("FLAT", IVF_FLAT")
config: Optional, TileDB config
index_timestamp: Optional, timestamp to write new texts with.
Example:
.. code-block:: python
from langchain_community import TileDB
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
db = TileDB.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts=texts,
embeddings=embeddings,
embedding=embedding,
metadatas=metadatas,
ids=ids,
metric=metric,
index_uri=index_uri,
index_type=index_type,
config=config,
index_timestamp=index_timestamp,
**kwargs,
)
@classmethod
def load(
cls,
index_uri: str,
embedding: Embeddings,
*,
metric: str = DEFAULT_METRIC,
config: Optional[Mapping[str, Any]] = None,
timestamp: Any = None,
**kwargs: Any,
) -> TileDB:
"""Load a TileDB index from a URI.
Args:
index_uri: The URI of the TileDB vector index.
embedding: Embeddings to use when generating queries.
metric: Optional, Metric to use for indexing. Defaults to "euclidean".
config: Optional, TileDB config
timestamp: Optional, timestamp to use for opening the arrays.
"""
return cls(
embedding=embedding,
index_uri=index_uri,
metric=metric,
config=config,
timestamp=timestamp,
**kwargs,
)
def consolidate_updates(self, **kwargs: Any) -> None:
self.vector_index = self.vector_index.consolidate_updates(**kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~reddit_search.py | """Wrapper for the Reddit API"""
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class RedditSearchAPIWrapper(BaseModel):
"""Wrapper for Reddit API
To use, set the environment variables ``REDDIT_CLIENT_ID``,
``REDDIT_CLIENT_SECRET``, ``REDDIT_USER_AGENT`` to set the client ID,
client secret, and user agent, respectively, as given by Reddit's API.
Alternatively, all three can be supplied as named parameters in the
constructor: ``reddit_client_id``, ``reddit_client_secret``, and
``reddit_user_agent``, respectively.
Example:
.. code-block:: python
from langchain_community.utilities import RedditSearchAPIWrapper
reddit_search = RedditSearchAPIWrapper()
"""
reddit_client: Any
# Values required to access Reddit API via praw
reddit_client_id: Optional[str]
reddit_client_secret: Optional[str]
reddit_user_agent: Optional[str]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the API ID, secret and user agent exists in environment
and check that praw module is present.
"""
reddit_client_id = get_from_dict_or_env(
values, "reddit_client_id", "REDDIT_CLIENT_ID"
)
values["reddit_client_id"] = reddit_client_id
reddit_client_secret = get_from_dict_or_env(
values, "reddit_client_secret", "REDDIT_CLIENT_SECRET"
)
values["reddit_client_secret"] = reddit_client_secret
reddit_user_agent = get_from_dict_or_env(
values, "reddit_user_agent", "REDDIT_USER_AGENT"
)
values["reddit_user_agent"] = reddit_user_agent
try:
import praw
except ImportError:
raise ImportError(
"praw package not found, please install it with pip install praw"
)
reddit_client = praw.Reddit(
client_id=reddit_client_id,
client_secret=reddit_client_secret,
user_agent=reddit_user_agent,
)
values["reddit_client"] = reddit_client
return values
def run(
self, query: str, sort: str, time_filter: str, subreddit: str, limit: int
) -> str:
"""Search Reddit and return posts as a single string."""
results: List[Dict] = self.results(
query=query,
sort=sort,
time_filter=time_filter,
subreddit=subreddit,
limit=limit,
)
if len(results) > 0:
output: List[str] = [f"Searching r/{subreddit} found {len(results)} posts:"]
for r in results:
category = "N/A" if r["post_category"] is None else r["post_category"]
p = f"Post Title: '{r['post_title']}'\n\
User: {r['post_author']}\n\
Subreddit: {r['post_subreddit']}:\n\
Text body: {r['post_text']}\n\
Post URL: {r['post_url']}\n\
Post Category: {category}.\n\
Score: {r['post_score']}\n"
output.append(p)
return "\n".join(output)
else:
return f"Searching r/{subreddit} did not find any posts:"
def results(
self, query: str, sort: str, time_filter: str, subreddit: str, limit: int
) -> List[Dict]:
"""Use praw to search Reddit and return a list of dictionaries,
one for each post.
"""
subredditObject = self.reddit_client.subreddit(subreddit)
search_results = subredditObject.search(
query=query, sort=sort, time_filter=time_filter, limit=limit
)
search_results = [r for r in search_results]
results_object = []
for submission in search_results:
results_object.append(
{
"post_subreddit": submission.subreddit_name_prefixed,
"post_category": submission.category,
"post_title": submission.title,
"post_text": submission.selftext,
"post_score": submission.score,
"post_id": submission.id,
"post_url": submission.url,
"post_author": submission.author,
}
)
return results_object
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~koboldai.py | import logging
from typing import Any, Dict, List, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
logger = logging.getLogger(__name__)
def clean_url(url: str) -> str:
"""Remove trailing slash and /api from url if present."""
if url.endswith("/api"):
return url[:-4]
elif url.endswith("/"):
return url[:-1]
else:
return url
class KoboldApiLLM(LLM):
"""Kobold API language model.
It includes several fields that can be used to control the text generation process.
To use this class, instantiate it with the required parameters and call it with a
prompt to generate text. For example:
kobold = KoboldApiLLM(endpoint="http://localhost:5000")
result = kobold("Write a story about a dragon.")
This will send a POST request to the Kobold API with the provided prompt and
generate text.
"""
endpoint: str
"""The API endpoint to use for generating text."""
use_story: Optional[bool] = False
""" Whether or not to use the story from the KoboldAI GUI when generating text. """
use_authors_note: Optional[bool] = False
"""Whether to use the author's note from the KoboldAI GUI when generating text.
This has no effect unless use_story is also enabled.
"""
use_world_info: Optional[bool] = False
"""Whether to use the world info from the KoboldAI GUI when generating text."""
use_memory: Optional[bool] = False
"""Whether to use the memory from the KoboldAI GUI when generating text."""
max_context_length: Optional[int] = 1600
"""Maximum number of tokens to send to the model.
minimum: 1
"""
max_length: Optional[int] = 80
"""Number of tokens to generate.
maximum: 512
minimum: 1
"""
rep_pen: Optional[float] = 1.12
"""Base repetition penalty value.
minimum: 1
"""
rep_pen_range: Optional[int] = 1024
"""Repetition penalty range.
minimum: 0
"""
rep_pen_slope: Optional[float] = 0.9
"""Repetition penalty slope.
minimum: 0
"""
temperature: Optional[float] = 0.6
"""Temperature value.
exclusiveMinimum: 0
"""
tfs: Optional[float] = 0.9
"""Tail free sampling value.
maximum: 1
minimum: 0
"""
top_a: Optional[float] = 0.9
"""Top-a sampling value.
minimum: 0
"""
top_p: Optional[float] = 0.95
"""Top-p sampling value.
maximum: 1
minimum: 0
"""
top_k: Optional[int] = 0
"""Top-k sampling value.
minimum: 0
"""
typical: Optional[float] = 0.5
"""Typical sampling value.
maximum: 1
minimum: 0
"""
@property
def _llm_type(self) -> str:
return "koboldai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm("Write a story about dragons.")
"""
data: Dict[str, Any] = {
"prompt": prompt,
"use_story": self.use_story,
"use_authors_note": self.use_authors_note,
"use_world_info": self.use_world_info,
"use_memory": self.use_memory,
"max_context_length": self.max_context_length,
"max_length": self.max_length,
"rep_pen": self.rep_pen,
"rep_pen_range": self.rep_pen_range,
"rep_pen_slope": self.rep_pen_slope,
"temperature": self.temperature,
"tfs": self.tfs,
"top_a": self.top_a,
"top_p": self.top_p,
"top_k": self.top_k,
"typical": self.typical,
}
if stop is not None:
data["stop_sequence"] = stop
response = requests.post(
f"{clean_url(self.endpoint)}/api/v1/generate", json=data
)
response.raise_for_status()
json_response = response.json()
if (
"results" in json_response
and len(json_response["results"]) > 0
and "text" in json_response["results"][0]
):
text = json_response["results"][0]["text"].strip()
if stop is not None:
for sequence in stop:
if text.endswith(sequence):
text = text[: -len(sequence)].rstrip()
return text
else:
raise ValueError(
f"Unexpected response format from Kobold API: {json_response}"
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~tongyi.py | from __future__ import annotations
import asyncio
import functools
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Union,
)
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from libs.core.langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
)
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.tongyi import check_response
logger = logging.getLogger(__name__)
def convert_dict_to_message(
_dict: Mapping[str, Any], is_chunk: bool = False
) -> Union[BaseMessage, BaseMessageChunk]:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return (
HumanMessageChunk(content=content)
if is_chunk
else HumanMessage(content=content)
)
elif role == "assistant":
return (
AIMessageChunk(content=content) if is_chunk else AIMessage(content=content)
)
elif role == "system":
return (
SystemMessageChunk(content=content)
if is_chunk
else SystemMessage(content=content)
)
else:
return (
ChatMessageChunk(role=role, content=content)
if is_chunk
else ChatMessage(role=role, content=content)
)
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage:
if isinstance(message_chunk, HumanMessageChunk):
return HumanMessage(content=message_chunk.content)
elif isinstance(message_chunk, AIMessageChunk):
return AIMessage(content=message_chunk.content)
elif isinstance(message_chunk, SystemMessageChunk):
return SystemMessage(content=message_chunk.content)
elif isinstance(message_chunk, ChatMessageChunk):
return ChatMessage(role=message_chunk.role, content=message_chunk.content)
else:
raise TypeError(f"Got unknown type {message_chunk}")
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dict."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_retry_decorator(llm: ChatTongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
class ChatTongyi(BaseChatModel):
"""Alibaba Tongyi Qwen chat models API.
To use, you should have the ``dashscope`` python package installed,
and set env ``DASHSCOPE_API_KEY`` with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import Tongyi
Tongyi_chat = ChatTongyi()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
client: Any #: :meta private:
model_name: str = Field(default="qwen-turbo", alias="model")
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = None
"""Dashscope api key provide by Alibaba Cloud."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
)
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope --upgrade`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Tongyi Qwen API."""
return {
"model": self.model_name,
"top_p": self.top_p,
"api_key": self.dashscope_api_key,
"result_format": "message",
**self.model_kwargs,
}
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
resp = self.client.call(**_kwargs)
return check_response(resp)
return _completion_with_retry(**kwargs)
def stream_completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
responses = self.client.call(**_kwargs)
for resp in responses:
yield check_response(resp)
return _stream_completion_with_retry(**kwargs)
async def astream_completion_with_retry(self, **kwargs: Any) -> Any:
"""Because the dashscope SDK doesn't provide an async API,
we wrap `stream_generate_with_retry` with an async generator."""
class _AioTongyiGenerator:
def __init__(self, generator: Any):
self.generator = generator
def __aiter__(self) -> AsyncIterator[Any]:
return self
async def __anext__(self) -> Any:
value = await asyncio.get_running_loop().run_in_executor(
None, self._safe_next
)
if value is not None:
return value
else:
raise StopAsyncIteration
def _safe_next(self) -> Any:
try:
return next(self.generator)
except StopIteration:
return None
async for chunk in _AioTongyiGenerator(
generator=self.stream_completion_with_retry(**kwargs)
):
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
generations = []
if self.streaming:
generation: Optional[ChatGenerationChunk] = None
for chunk in self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
generations.append(self._chunk_to_generation(generation))
else:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, **kwargs
)
resp = self.completion_with_retry(**params)
generations.append(
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
)
return ChatResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
generations = []
if self.streaming:
generation: Optional[ChatGenerationChunk] = None
async for chunk in self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
generations.append(self._chunk_to_generation(generation))
else:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, **kwargs
)
resp = await asyncio.get_running_loop().run_in_executor(
None,
functools.partial(
self.completion_with_retry, **{"run_manager": run_manager, **params}
),
)
generations.append(
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
)
return ChatResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, stream=True, **kwargs
)
for stream_resp in self.stream_completion_with_retry(**params):
chunk = ChatGenerationChunk(
**self._chat_generation_from_qwen_resp(stream_resp, is_chunk=True)
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, stream=True, **kwargs
)
async for stream_resp in self.astream_completion_with_retry(**params):
chunk = ChatGenerationChunk(
**self._chat_generation_from_qwen_resp(stream_resp, is_chunk=True)
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _invocation_params(
self, messages: List[BaseMessage], stop: Any, **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
if stop is not None:
params["stop"] = stop
if params.get("stream"):
params["incremental_output"] = True
message_dicts = [convert_message_to_dict(m) for m in messages]
# According to the docs, the last message should be a `user` message
if message_dicts[-1]["role"] != "user":
raise ValueError("Last message should be user message.")
# And the `system` message should be the first message if present
system_message_indices = [
i for i, m in enumerate(message_dicts) if m["role"] == "system"
]
if len(system_message_indices) != 1 or system_message_indices[0] != 0:
raise ValueError("System message can only be the first message.")
params["messages"] = message_dicts
return params
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
if llm_outputs[0] is None:
return {}
return llm_outputs[0]
@staticmethod
def _chat_generation_from_qwen_resp(
resp: Any, is_chunk: bool = False
) -> Dict[str, Any]:
choice = resp["output"]["choices"][0]
message = convert_dict_to_message(choice["message"], is_chunk=is_chunk)
return dict(
message=message,
generation_info=dict(
finish_reason=choice["finish_reason"],
request_id=resp["request_id"],
token_usage=dict(resp["usage"]),
),
)
@staticmethod
def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration:
return ChatGeneration(
message=convert_message_chunk_to_message(chunk.message),
generation_info=chunk.generation_info,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~utilities~test_arxiv.py | """Integration test for Arxiv API Wrapper."""
from typing import Any, List
import pytest
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.tools import BaseTool
from langchain_community.tools import ArxivQueryRun
from langchain_community.utilities import ArxivAPIWrapper
@pytest.fixture
def api_client() -> ArxivAPIWrapper:
return ArxivAPIWrapper()
def test_run_success_paper_name(api_client: ArxivAPIWrapper) -> None:
"""Test a query of paper name that returns the correct answer"""
output = api_client.run("Heat-bath random walks with Markov bases")
assert "Probability distributions for Markov chains based quantum walks" in output
assert (
"Transformations of random walks on groups via Markov stopping times" in output
)
assert (
"Recurrence of Multidimensional Persistent Random Walks. Fourier and Series "
"Criteria" in output
)
def test_run_success_arxiv_identifier(api_client: ArxivAPIWrapper) -> None:
"""Test a query of an arxiv identifier returns the correct answer"""
output = api_client.run("1605.08386v1")
assert "Heat-bath random walks with Markov bases" in output
def test_run_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper) -> None:
"""Test a query of multiple arxiv identifiers that returns the correct answer"""
output = api_client.run("1605.08386v1 2212.00794v2 2308.07912")
assert "Heat-bath random walks with Markov bases" in output
assert "Scaling Language-Image Pre-training via Masking" in output
assert (
"Ultra-low mass PBHs in the early universe can explain the PTA signal" in output
)
def test_run_returns_several_docs(api_client: ArxivAPIWrapper) -> None:
"""Test that returns several docs"""
output = api_client.run("Caprice Stanley")
assert "On Mixing Behavior of a Family of Random Walks" in output
def test_run_returns_no_result(api_client: ArxivAPIWrapper) -> None:
"""Test that gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good Arxiv Result was found" == output
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
def test_load_success_paper_name(api_client: ArxivAPIWrapper) -> None:
"""Test a query of paper name that returns one document"""
docs = api_client.load("Heat-bath random walks with Markov bases")
assert len(docs) == 3
assert_docs(docs)
def test_load_success_arxiv_identifier(api_client: ArxivAPIWrapper) -> None:
"""Test a query of an arxiv identifier that returns one document"""
docs = api_client.load("1605.08386v1")
assert len(docs) == 1
assert_docs(docs)
def test_load_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper) -> None:
"""Test a query of arxiv identifiers that returns the correct answer"""
docs = api_client.load("1605.08386v1 2212.00794v2 2308.07912")
assert len(docs) == 3
assert_docs(docs)
def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None:
"""Test that returns no docs"""
docs = api_client.load("1605.08386WWW")
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
api_client = ArxivAPIWrapper(load_max_docs=expected_docs)
docs = api_client.load("ChatGPT")
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_limited_doc_content_chars() -> None:
"""Test that returns limited doc_content_chars_max"""
doc_content_chars_max = 100
api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max)
docs = api_client.load("1605.08386")
assert len(docs[0].page_content) == doc_content_chars_max
def test_load_returns_unlimited_doc_content_chars() -> None:
"""Test that returns unlimited doc_content_chars_max"""
doc_content_chars_max = None
api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max)
docs = api_client.load("1605.08386")
assert len(docs[0].page_content) == pytest.approx(54338, rel=1e-2)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load("ChatGPT")
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Published", "Title", "Authors", "Summary"}
)
print(doc.metadata)
assert len(set(doc.metadata)) > 4
def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool:
from langchain.agents.load_tools import load_tools
tools = load_tools(["arxiv"], **kwargs)
assert len(tools) == 1, "loaded more than 1 tool"
return tools[0]
def test_load_arxiv_from_universal_entry() -> None:
arxiv_tool = _load_arxiv_from_universal_entry()
output = arxiv_tool("Caprice Stanley")
assert (
"On Mixing Behavior of a Family of Random Walks" in output
), "failed to fetch a valid result"
def test_load_arxiv_from_universal_entry_with_params() -> None:
params = {
"top_k_results": 1,
"load_max_docs": 10,
"load_all_available_meta": True,
}
arxiv_tool = _load_arxiv_from_universal_entry(**params)
assert isinstance(arxiv_tool, ArxivQueryRun)
wp = arxiv_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
assert wp.load_max_docs == 10, "failed to assert load_max_docs"
assert (
wp.load_all_available_meta is True
), "failed to assert load_all_available_meta"
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | from langchain_community.document_loaders.parsers.language.cobol import CobolSegmenter
__all__ = ["CobolSegmenter"]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~file_management~move.py | import shutil
from typing import Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.tools import BaseTool
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileMoveInput(BaseModel):
"""Input for MoveFileTool."""
source_path: str = Field(..., description="Path of the file to move")
destination_path: str = Field(..., description="New path for the moved file")
class MoveFileTool(BaseFileToolMixin, BaseTool):
"""Tool that moves a file."""
name: str = "move_file"
args_schema: Type[BaseModel] = FileMoveInput
description: str = "Move or rename a file from one location to another"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path_", value=destination_path_
)
if not source_path_.exists():
return f"Error: no such file or directory {source_path}"
try:
# shutil.move expects str args in 3.8
shutil.move(str(source_path_), destination_path_)
return f"File moved successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"Move or rename a file from one location to another"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~memory~buffer_window.py | from typing import Any, Dict, List, Union
from libs.core.langchain_core.messages import BaseMessage, get_buffer_string
from langchain.memory.chat_memory import BaseChatMemory
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~docstore~in_memory.py | """Simple in memory docstore in the form of a dict."""
from typing import Dict, List, Optional, Union
from libs.core.langchain_core.documents import Document
from langchain_community.docstore.base import AddableMixin, Docstore
class InMemoryDocstore(Docstore, AddableMixin):
"""Simple in memory docstore in the form of a dict."""
def __init__(self, _dict: Optional[Dict[str, Document]] = None):
"""Initialize with dict."""
self._dict = _dict if _dict is not None else {}
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
self._dict = {**self._dict, **texts}
def delete(self, ids: List) -> None:
"""Deleting IDs from in memory dictionary."""
overlapping = set(ids).intersection(self._dict)
if not overlapping:
raise ValueError(f"Tried to delete ids that does not exist: {ids}")
for _id in ids:
self._dict.pop(_id)
def search(self, search: str) -> Union[str, Document]:
"""Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f"ID {search} not found."
else:
return self._dict[search]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~qdrant.py | from __future__ import annotations
import functools
import uuid
import warnings
from itertools import islice
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.runnables.config import run_in_executor
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client import grpc # noqa
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
class QdrantException(Exception):
"""`Qdrant` related exceptions."""
def sync_call_fallback(method: Callable) -> Callable:
"""
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
"""
@functools.wraps(method)
async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
try:
return await method(self, *args, **kwargs)
except NotImplementedError:
# If the async method is not implemented, call the synchronous method
# by removing the first letter from the method name. For example,
# if the async method is called ``aaad_texts``, the synchronous method
# will be called ``aad_texts``.
return await run_in_executor(
None, getattr(self, method.__name__[1:]), *args, **kwargs
)
return wrapper
class Qdrant(VectorStore):
"""`Qdrant` vector store.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain_community.vectorstores import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
VECTOR_NAME = None
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: Optional[str] = VECTOR_NAME,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ImportError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self._embeddings = embeddings
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
self.vector_name = vector_name or self.VECTOR_NAME
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self._embeddings = None
self.distance_strategy = distance_strategy.upper()
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
self.client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
@sync_call_fallback
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
added_ids = []
async for batch_ids, points in self._agenerate_rest_batches(
texts, metadatas, ids, batch_size
):
await self.client.async_grpc_points.Upsert(
grpc.UpsertPoints(
collection_name=self.collection_name,
points=[RestToGrpc.convert_point_struct(point) for point in points],
)
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
return self.similarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
return await self.asimilarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
async def _asearch_with_score_by_vector(
self,
embedding: List[float],
*,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
with_vectors: bool = False,
**kwargs: Any,
) -> Any:
"""Return results most similar to embedding vector."""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
from qdrant_client.http import models as rest
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter):
qdrant_filter = RestToGrpc.convert_filter(qdrant_filter)
response = await self.client.async_grpc_points.Search(
grpc.SearchPoints(
collection_name=self.collection_name,
vector_name=self.vector_name,
vector=embedding,
filter=qdrant_filter,
params=search_params,
limit=k,
offset=offset,
with_payload=grpc.WithPayloadSelector(enable=True),
with_vectors=grpc.WithVectorsSelector(enable=with_vectors),
score_threshold=score_threshold,
read_consistency=consistency,
**kwargs,
)
)
return response
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
response = await self._asearch_with_score_by_vector(
embedding,
k=k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point_grpc(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in response.result
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return await self.amax_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
QdrantClient.async_grpc_points.Search().
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
results = await self.amax_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=search_params,
limit=fetch_k,
with_payload=True,
with_vectors=True,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
embeddings = [
result.vector.get(self.vector_name) # type: ignore[index, union-attr]
if self.vector_name is not None
else result.vector
for result in results
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
),
results[i].score,
)
for i in mmr_selected
]
@sync_call_fallback
async def amax_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
from qdrant_client.conversions.conversion import GrpcToRest
response = await self._asearch_with_score_by_vector(
embedding,
k=fetch_k,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
with_vectors=True,
**kwargs,
)
results = [
GrpcToRest.convert_vectors(result.vectors) for result in response.result
]
embeddings: List[List[float]] = [
result.get(self.vector_name) # type: ignore
if isinstance(result, dict)
else result
for result in results
]
mmr_selected: List[int] = maximal_marginal_relevance(
np.array(embedding),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
return [
(
self._document_from_scored_point_grpc(
response.result[i],
self.content_payload_key,
self.metadata_payload_key,
),
response.result[i].score,
)
for i in mmr_selected
]
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from qdrant_client.http import models as rest
result = self.client.delete(
collection_name=self.collection_name,
points_selector=ids,
)
return result.status == rest.UpdateStatus.COMPLETED
@classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Qdrant
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
qdrant = cls.construct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
qdrant.add_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
@sync_call_fallback
async def afrom_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Qdrant
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
"""
qdrant = await cls.aconstruct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
def construct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from grpc import RpcError
from qdrant_client.http import models as rest
from qdrant_client.http.exceptions import UnexpectedResponse
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
try:
# Skip any validation in case of forced collection recreate.
if force_recreate:
raise ValueError
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is being thrown.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}." # noqa
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size: # type: ignore[union-attr]
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} " # type: ignore[union-attr]
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_distance_func} similarity, but requested "
f"{distance_func}. Please set `distance_func` parameter to "
f"`{current_distance_func}` if you want to reuse it. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
except (UnexpectedResponse, RpcError, ValueError):
vectors_config = rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.recreate_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
)
return qdrant
@classmethod
async def aconstruct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from grpc import RpcError
from qdrant_client.http import models as rest
from qdrant_client.http.exceptions import UnexpectedResponse
# Just do a single quick embedding to get vector size
partial_embeddings = await embedding.aembed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
try:
# Skip any validation in case of forced collection recreate.
if force_recreate:
raise ValueError
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is being thrown.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}." # noqa
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size: # type: ignore[union-attr]
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} " # type: ignore[union-attr]
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_vector_config.distance} " # type: ignore[union-attr]
f"similarity. Please set `distance_func` parameter to "
f"`{distance_func}` if you want to reuse it. If you want to "
f"recreate the collection, set `force_recreate` parameter to "
f"`True`."
)
except (UnexpectedResponse, RpcError, ValueError):
vectors_config = rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.recreate_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
)
return qdrant
@staticmethod
def _cosine_relevance_score_fn(distance: float) -> float:
"""Normalize the distance to a score on a scale [0, 1]."""
return (distance + 1.0) / 2.0
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == "COSINE":
return self._cosine_relevance_score_fn
elif self.distance_strategy == "DOT":
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == "EUCLID":
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, "
"max_inner_product, or euclidean"
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
return self.similarity_search_with_score(query, k, **kwargs)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
@classmethod
def _document_from_scored_point_grpc(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
from qdrant_client.conversions.conversion import grpc_to_payload
payload = grpc_to_payload(scored_point.payload)
return Document(
page_content=payload[content_payload_key],
metadata=payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
def _embed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
else:
if self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
async def _aembed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = await self.embeddings.aembed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]:
from qdrant_client.http import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = self._embed_texts(batch_texts)
points = [
rest.PointStruct(
id=point_id,
vector=vector
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
async def _agenerate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> AsyncGenerator[Tuple[List[str], List[rest.PointStruct]], None]:
from qdrant_client.http import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = await self._aembed_texts(batch_texts)
points = [
rest.PointStruct(
id=point_id,
vector=vector
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~evaluation~criteria~eval_chain.py | from __future__ import annotations
import re
from enum import Enum
from typing import Any, Dict, List, Mapping, Optional, Union
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.output_parsers import BaseOutputParser
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Extra, Field
from langchain.callbacks.manager import Callbacks
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
from langchain.evaluation.criteria.prompt import PROMPT, PROMPT_WITH_REFERENCES
from langchain.evaluation.schema import LLMEvalChain, StringEvaluator
from langchain.schema import RUN_KEY
class Criteria(str, Enum):
"""A Criteria to evaluate."""
CONCISENESS = "conciseness"
RELEVANCE = "relevance"
CORRECTNESS = "correctness"
COHERENCE = "coherence"
HARMFULNESS = "harmfulness"
MALICIOUSNESS = "maliciousness"
HELPFULNESS = "helpfulness"
CONTROVERSIALITY = "controversiality"
MISOGYNY = "misogyny"
CRIMINALITY = "criminality"
INSENSITIVITY = "insensitivity"
DEPTH = "depth"
CREATIVITY = "creativity"
DETAIL = "detail"
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?"
" If so, respond Y. If not, respond N.",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?"
" If so, respond Y. If not, respond N.",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?"
" If so, respond Y. If not, respond N.",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?"
" If so, respond Y. If not, respond N.",
Criteria.MISOGYNY: "Is the submission misogynistic or sexist?"
" If so, respond Y. If not, respond N.",
Criteria.CRIMINALITY: "Is the submission criminal in any way?"
" If so, respond Y. If not, respond N.",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?"
" If so, respond Y. If not, respond N.",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
class CriteriaResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the CriteriaEvalChain."""
@property
def _type(self) -> str:
return "criteria_result"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
"""
verdict = None
score = None
match_last = re.search(r"\s*(Y|N)\s*$", text, re.IGNORECASE)
match_first = re.search(r"^\s*(Y|N)\s*", text, re.IGNORECASE)
match_end = re.search(r"\b(Y|N)\b\s*$", text, re.IGNORECASE)
if match_last:
verdict = match_last.group(1).strip()
text = text[: match_last.start()].strip()
elif match_first:
verdict = match_first.group(1).strip()
text = text[match_first.end() :].strip()
elif match_end:
verdict = match_end.group(1).strip()
text = text[: match_end.start()].strip()
else:
splits = text.strip().rsplit("\n", maxsplit=1)
if len(splits) == 1:
reasoning = ""
verdict = splits[0]
else:
reasoning, verdict = splits
if verdict:
score = (
1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None)
)
return {
"reasoning": text.strip(),
"value": verdict,
"score": score,
}
CRITERIA_TYPE = Union[
Mapping[str, str],
Criteria,
ConstitutionalPrinciple,
]
def resolve_criteria(
criteria: Optional[Union[CRITERIA_TYPE, str]],
) -> Dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
if criteria is None:
return {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS],
}
if isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
else:
if not criteria:
raise ValueError(
"Criteria cannot be empty. "
"Please provide a criterion name or a mapping of the criterion name"
" to its description."
)
criteria_ = dict(criteria)
return criteria_
class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
"""LLM Chain for evaluating runs against criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : Union[Mapping[str, str]]
The criteria or rubric to evaluate the runs against. It can be a mapping of
criterion name to its description, or a single criterion name.
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided, a
default prompt template will be used based on the value of
`requires_reference`.
requires_reference : bool, default=False
Whether the evaluation requires a reference text. If `True`, the
`PROMPT_WITH_REFERENCES` template will be used, which includes the
reference labels in the prompt. Otherwise, the `PROMPT` template will be
used, which is a reference-free prompt.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain.chat_models import ChatAnthropic
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = ChatAnthropic(temperature=0)
>>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"}
>>> evaluator = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> evaluator.evaluate_strings(prediction="Imagine an ice cream flavor for the color aquamarine", input="Tell me an idea")
{
'reasoning': 'Here is my step-by-step reasoning for the given criteria:\\n\\nThe criterion is: "Is the submission the most amazing ever?" This is a subjective criterion and open to interpretation. The submission suggests an aquamarine-colored ice cream flavor which is creative but may or may not be considered the most amazing idea ever conceived. There are many possible amazing ideas and this one ice cream flavor suggestion may or may not rise to that level for every person. \\n\\nN',
'value': 'N',
'score': 0,
}
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = ChatOpenAI(model="gpt-4", temperature=0)
>>> criteria = "correctness"
>>> evaluator = LabeledCriteriaEvalChain.from_llm(
... llm=llm,
... criteria=criteria,
... )
>>> evaluator.evaluate_strings(
... prediction="The answer is 4",
... input="How many apples are there?",
... reference="There are 3 apples",
... )
{
'score': 0,
'reasoning': 'The criterion for this task is the correctness of the submission. The submission states that there are 4 apples, but the reference indicates that there are actually 3 apples. Therefore, the submission is not correct, accurate, or factual according to the given criterion.\\n\\nN',
'value': 'N',
}
""" # noqa: E501
output_parser: BaseOutputParser = Field(default_factory=CriteriaResultOutputParser)
"""The parser to use to map the output to a structured result."""
criterion_name: str
"""The name of the criterion being evaluated."""
output_key: str = "results" #: :meta private:
@classmethod
def is_lc_serializable(cls) -> bool:
return False
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return False
@property
def requires_input(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
"""Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
"""
return self.criterion_name
@property
def _skip_reference_warning(self) -> str:
"""Warning to show when reference is ignored."""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use references, use the labeled_criteria instead."
)
@classmethod
def _resolve_prompt(
cls, prompt: Optional[BasePromptTemplate] = None
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria"}
prompt_ = prompt or PROMPT
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
return prompt_
@classmethod
def resolve_criteria(
cls,
criteria: Optional[Union[CRITERIA_TYPE, str]],
) -> Dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
return resolve_criteria(criteria)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: Optional[CRITERIA_TYPE] = None,
*,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt_ = cls._resolve_prompt(prompt)
if criteria == Criteria.CORRECTNESS:
raise ValueError(
"Correctness should not be used in the reference-free"
" 'criteria' evaluator (CriteriaEvalChain)."
" Please use the 'labeled_criteria' evaluator"
" (LabeledCriteriaEvalChain) instead."
)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt_.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
def _get_eval_input(
self,
prediction: str,
reference: Optional[str],
input: Optional[str],
) -> dict:
"""Get the evaluation input."""
input_ = {
"input": input,
"output": prediction,
}
if self.requires_reference:
input_["reference"] = reference
return input_
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
def _evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate a prediction against the criteria.
Parameters
----------
prediction : str
The predicted text to evaluate.
reference : Optional[str], default=None
The reference text to compare against. This is required if
`requires_reference` is `True`.
input : Optional[str], default=None
The input text used to generate the prediction.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` `__call__`
method.
Returns
-------
dict
The evaluation results.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> chain.evaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = self(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a prediction against the criteria.
Parameters
----------
prediction : str
The predicted text to evaluate.
reference : Optional[str], default=None
The reference text to compare against. This is required if
`requires_reference` is `True`.
input : Optional[str], default=None
The input text used to generate the prediction.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` `acall`
method.
Returns
-------
dict
The evaluation results.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import CriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)
>>> await chain.aevaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = await self.acall(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class LabeledCriteriaEvalChain(CriteriaEvalChain):
"""Criteria evaluation chain that requires references."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return True
@classmethod
def _resolve_prompt(
cls, prompt: Optional[BasePromptTemplate] = None
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria", "reference"}
prompt_ = prompt or PROMPT_WITH_REFERENCES
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
return prompt_
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: Optional[CRITERIA_TYPE] = None,
*,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples
--------
>>> from langchain.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt = cls._resolve_prompt(prompt)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_hunyuan.py | from libs.core.langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_models.hunyuan import ChatHunyuan
def test_chat_hunyuan() -> None:
chat = ChatHunyuan()
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_hunyuan_with_temperature() -> None:
chat = ChatHunyuan(temperature=0.6)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_extra_kwargs() -> None:
chat = ChatHunyuan(temperature=0.88, top_p=0.7)
assert chat.temperature == 0.88
assert chat.top_p == 0.7
| [
"Hello"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~obs_file.py | # coding:utf-8
import os
import tempfile
from typing import Any, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class OBSFileLoader(BaseLoader):
"""Load from the `Huawei OBS file`."""
def __init__(
self,
bucket: str,
key: str,
client: Any = None,
endpoint: str = "",
config: Optional[dict] = None,
) -> None:
"""Initialize the OBSFileLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
key (str): The name of the object in the OBS bucket.
client (ObsClient, optional): An instance of the ObsClient to connect to OBS.
endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided.
config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
Raises:
ValueError: If the `esdk-obs-python` package is not installed.
TypeError: If the provided `client` is not an instance of ObsClient.
ValueError: If `client` is not provided, but `endpoint` is missing.
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSFileLoader with a new client:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config)
```
To create a new OBSFileLoader with an existing client:
```
from obs import ObsClient
# Assuming you have an existing ObsClient object 'obs_client'
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client)
```
To create a new OBSFileLoader without an existing client:
```
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url")
```
""" # noqa: E501
try:
from obs import ObsClient
except ImportError:
raise ImportError(
"Could not import esdk-obs-python python package. "
"Please install it with `pip install esdk-obs-python`."
)
if not client:
if not endpoint:
raise ValueError("Either OBSClient or endpoint must be provided.")
if not config:
config = dict()
if config.get("get_token_from_ecs"):
client = ObsClient(server=endpoint, security_provider_policy="ECS")
else:
client = ObsClient(
access_key_id=config.get("ak"),
secret_access_key=config.get("sk"),
security_token=config.get("token"),
server=endpoint,
)
if not isinstance(client, ObsClient):
raise TypeError("Client must be ObsClient type")
self.client = client
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
"""Load documents."""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
self.client.downloadFile(
bucketName=self.bucket, objectKey=self.key, downloadFile=file_path
)
loader = UnstructuredFileLoader(file_path)
return loader.load()
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~llms~test_volcengine_maas.py | """Test volc engine maas LLM model."""
from typing import Generator
from libs.core.langchain_core.outputs import LLMResult
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.volcengine_maas import (
VolcEngineMaasBase,
VolcEngineMaasLLM,
)
def test_api_key_is_string() -> None:
llm = VolcEngineMaasBase(
volc_engine_maas_ak="secret-volc-ak",
volc_engine_maas_sk="secret-volc-sk",
)
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = VolcEngineMaasBase(
volc_engine_maas_ak="secret-volc-ak",
volc_engine_maas_sk="secret-volc-sk",
)
print(llm.volc_engine_maas_ak, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_default_call() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm("tell me a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.generate(["tell me a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True)
output = llm.stream("tell me a joke")
assert isinstance(output, Generator)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_neo4jvector.py | """Test Neo4jVector functionality."""
import os
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.neo4j_vector import (
Neo4jVector,
SearchType,
_get_search_index_query,
)
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
url = os.environ.get("NEO4J_URL", "bolt://localhost:7687")
username = os.environ.get("NEO4J_USERNAME", "neo4j")
password = os.environ.get("NEO4J_PASSWORD", "pleaseletmein")
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz", "It is the end of the world. Take shelter!"]
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f neo4j.yml up
"""
def drop_vector_indexes(store: Neo4jVector) -> None:
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX {index['name']}")
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
def test_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_euclidean() -> None:
"""Test euclidean distance"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_catch_wrong_index_name() -> None:
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_catch_wrong_node_label() -> None:
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_with_metadatas() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
drop_vector_indexes(docsearch)
def test_neo4jvector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9998376369476318),
(Document(page_content="baz", metadata={"page": "2"}), 0.9993523359298706),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.9999},
)
output = retriever.get_relevant_documents("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
drop_vector_indexes(docsearch)
def test_custom_return_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
retrieval_query="RETURN 'foo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_prefer_indexname() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["bar"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
output = existing_index.similarity_search("bar", k=1)
assert output == [Document(page_content="bar", metadata={})]
drop_vector_indexes(existing_index)
def test_neo4jvector_prefer_indexname_insert() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["baz"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
existing_index.add_documents([Document(page_content="bar", metadata={})])
output = existing_index.similarity_search("bar", k=2)
assert output == [
Document(page_content="bar", metadata={}),
Document(page_content="foo", metadata={}),
]
drop_vector_indexes(existing_index)
def test_neo4jvector_hybrid() -> None:
"""Test end to end construction with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_deduplicate() -> None:
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN 'moo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="moo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query2() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN node.text AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_missing_keyword() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
try:
Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
search_type=SearchType.HYBRID,
)
except ValueError as e:
assert str(e) == (
"keyword_index name has to be specified when " "using hybrid search option"
)
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_from_existing() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
keyword_index_name="keyword",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph() -> None:
"""Test from_existing_graph with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_hybrid() -> None:
"""Test from_existing_graph hybrid with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_special_character() -> None:
"""Test removing lucene."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search(
"It is the end of the world. Take shelter!", k=1
)
assert output == [
Document(page_content="It is the end of the world. Take shelter!", metadata={})
]
drop_vector_indexes(docsearch)
def test_hybrid_score_normalization() -> None:
"""Test if we can get two 1.0 documents with RRF"""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(["foo"], text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
# Remove deduplication part of the query
rrf_query = (
_get_search_index_query(SearchType.HYBRID)
.rstrip("WITH node, max(score) AS score ORDER BY score DESC LIMIT $k")
.replace("UNION", "UNION ALL")
+ "RETURN node.text AS text, score LIMIT 2"
)
output = docsearch.query(
rrf_query,
params={
"index": "vector",
"k": 1,
"embedding": FakeEmbeddingsWithOsDimension().embed_query("foo"),
"query": "foo",
"keyword_index": "keyword",
},
)
# Both FT and Vector must return 1.0 score
assert output == [{"text": "foo", "score": 1.0}, {"text": "foo", "score": 1.0}]
drop_vector_indexes(docsearch)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~evaluation~qa~generate_prompt.py | # flake8: noqa
from langchain.output_parsers.regex import RegexParser
from libs.core.langchain_core.prompts import PromptTemplate
template = """You are a teacher coming up with questions to ask on a quiz.
Given the following document, please generate a question and answer based on that document.
Example Format:
<Begin Document>
...
<End Document>
QUESTION: question here
ANSWER: answer here
These questions should be detailed and be based explicitly on information in the document. Begin!
<Begin Document>
{doc}
<End Document>"""
PROMPT = PromptTemplate(
input_variables=["doc"],
template=template,
)
| [
"You are a teacher coming up with questions to ask on a quiz. \nGiven the following document, please generate a question and answer based on that document.\n\nExample Format:\n<Begin Document>\n...\n<End Document>\nQUESTION: question here\nANSWER: answer here\n\nThese questions should be detailed and be based explicitly on information in the document. Begin!\n\n<Begin Document>\n{doc}\n<End Document>"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~neptune_cypher.py | from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.prompts.base import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.graphs import NeptuneGraph
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def trim_query(query: str) -> str:
"""Trim the query to only include Cypher keywords."""
keywords = (
"CALL",
"CREATE",
"DELETE",
"DETACH",
"LIMIT",
"MATCH",
"MERGE",
"OPTIONAL",
"ORDER",
"REMOVE",
"RETURN",
"SET",
"SKIP",
"UNWIND",
"WITH",
"WHERE",
"//",
)
lines = query.split("\n")
new_query = ""
for line in lines:
if line.strip().upper().startswith(keywords):
new_query += line + "\n"
return new_query
def extract_cypher(text: str) -> str:
"""Extract Cypher code from text using Regex."""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
def use_simple_prompt(llm: BaseLanguageModel) -> bool:
"""Decides whether to use the simple prompt"""
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore
return True
# Bedrock anthropic
if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore
return True
return False
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=NEPTUNE_OPENCYPHER_GENERATION_PROMPT,
conditionals=[(use_simple_prompt, NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT)],
)
class NeptuneOpenCypherQAChain(Chain):
"""Chain for question-answering against a Neptune graph
by generating openCypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
Example:
.. code-block:: python
chain = NeptuneOpenCypherQAChain.from_llm(
llm=llm,
graph=graph
)
response = chain.run(query)
"""
graph: NeptuneGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
extra_instructions: Optional[str] = None
"""Extra instructions by the appended to the query generation prompt."""
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: Optional[BasePromptTemplate] = None,
extra_instructions: Optional[str] = None,
**kwargs: Any,
) -> NeptuneOpenCypherQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
_cypher_prompt = cypher_prompt or PROMPT_SELECTOR.get_prompt(llm)
cypher_generation_chain = LLMChain(llm=llm, prompt=_cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
extra_instructions=extra_instructions,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{
"question": question,
"schema": self.graph.get_schema,
"extra_instructions": self.extra_instructions or "",
},
callbacks=callbacks,
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
generated_cypher = trim_query(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
context = self.graph.query(generated_cypher)
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~titan_takeoff_pro.py | from typing import Any, Iterator, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from requests.exceptions import ConnectionError
from langchain_community.llms.utils import enforce_stop_tokens
class TitanTakeoffPro(LLM):
"""Titan Takeoff Pro is a language model that can be used to generate text."""
base_url: Optional[str] = "http://localhost:3000"
"""Specifies the baseURL to use for the Titan Takeoff Pro API.
Default = http://localhost:3000.
"""
max_new_tokens: Optional[int] = None
"""Maximum tokens generated."""
min_new_tokens: Optional[int] = None
"""Minimum tokens generated."""
sampling_topk: Optional[int] = None
"""Sample predictions from the top K most probable candidates."""
sampling_topp: Optional[float] = None
"""Sample from predictions whose cumulative probability exceeds this value.
"""
sampling_temperature: Optional[float] = None
"""Sample with randomness. Bigger temperatures are associated with
more randomness and 'creativity'.
"""
repetition_penalty: Optional[float] = None
"""Penalise the generation of tokens that have been generated before.
Set to > 1 to penalize.
"""
regex_string: Optional[str] = None
"""A regex string for constrained generation."""
no_repeat_ngram_size: Optional[int] = None
"""Prevent repetitions of ngrams of this size. Default = 0 (turned off)."""
streaming: bool = False
"""Whether to stream the output. Default = False."""
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Titan Takeoff Server (Pro)."""
return {
**(
{"regex_string": self.regex_string}
if self.regex_string is not None
else {}
),
**(
{"sampling_temperature": self.sampling_temperature}
if self.sampling_temperature is not None
else {}
),
**(
{"sampling_topp": self.sampling_topp}
if self.sampling_topp is not None
else {}
),
**(
{"repetition_penalty": self.repetition_penalty}
if self.repetition_penalty is not None
else {}
),
**(
{"max_new_tokens": self.max_new_tokens}
if self.max_new_tokens is not None
else {}
),
**(
{"min_new_tokens": self.min_new_tokens}
if self.min_new_tokens is not None
else {}
),
**(
{"sampling_topk": self.sampling_topk}
if self.sampling_topk is not None
else {}
),
**(
{"no_repeat_ngram_size": self.no_repeat_ngram_size}
if self.no_repeat_ngram_size is not None
else {}
),
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "titan_takeoff_pro"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
):
text_output += chunk.text
return text_output
url = f"{self.base_url}/generate"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = "utf-8"
text = ""
if "text" in response.json():
text = response.json()["text"]
text = text.replace("</s>", "")
else:
raise ValueError("Something went wrong.")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
"Could not connect to Titan Takeoff (Pro) server. \
Please make sure that the server is running."
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f"{self.base_url}/generate_stream"
params = {"text": prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = "utf-8"
buffer = ""
for text in response.iter_content(chunk_size=1, decode_unicode=True):
buffer += text
if "data:" in buffer:
# Remove the first instance of "data:" from the buffer.
if buffer.startswith("data:"):
buffer = ""
if len(buffer.split("data:", 1)) == 2:
content, _ = buffer.split("data:", 1)
buffer = content.rstrip("\n")
# Trim the buffer to only have content after the "data:" part.
if buffer: # Ensure that there's content to process.
chunk = GenerationChunk(text=buffer)
buffer = "" # Reset buffer for the next set of data.
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
# Yield any remaining content in the buffer.
if buffer:
chunk = GenerationChunk(text=buffer.replace("</s>", ""))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"base_url": self.base_url, **{}, **self._default_params}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~edenai.py | from typing import Any, Dict, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.utilities.requests import Requests
class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[str] = Field(None, description="EdenAI API Token")
provider: str = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {"texts": texts, "providers": self.provider}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
| [
"application/json"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~document_compressors~chain_filter.py | """Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain.callbacks.manager import Callbacks
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(
**_input, callbacks=callbacks
)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~question_answering~refine_prompts.py | # flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from libs.core.langchain_core.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from libs.core.langchain_core.prompts.prompt import PromptTemplate
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {question}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_PROMPT = PromptTemplate.from_template(DEFAULT_REFINE_PROMPT_TMPL)
refine_template = (
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages(
[("human", "{question}"), ("ai", "{existing_answer}"), ("human", refine_template)]
)
REFINE_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_REFINE_PROMPT,
conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)],
)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {question}\n"
)
DEFAULT_TEXT_QA_PROMPT = PromptTemplate.from_template(DEFAULT_TEXT_QA_PROMPT_TMPL)
chat_qa_prompt_template = (
"Context information is below.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the context information and not prior knowledge, "
"answer any questions"
)
CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(
[("system", chat_qa_prompt_template), ("human", "{question}")]
)
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_TEXT_QA_PROMPT,
conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)],
)
| [
"{existing_answer}",
"Context information is below.\n------------\n{context_str}\n------------\nGiven the context information and not prior knowledge, answer any questions",
"We have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"human",
"Context information is below. \n------------\n{context_str}\n------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
"[('human', '{question}'), ('ai', '{existing_answer}'), ('human', PLACEHOLDER)]",
"{question}",
"[('system', PLACEHOLDER), ('human', '{question}')]",
"The original question is as follows: {question}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer."
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~html_bs.py | import logging
from typing import Dict, List, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class BSHTMLLoader(BaseLoader):
"""Load `HTML` files and parse them with `beautiful soup`."""
def __init__(
self,
file_path: str,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""initialize with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object.
Args:
file_path: The path to the file to load.
open_encoding: The encoding to use when opening the file.
bs_kwargs: Any kwargs to pass to the BeautifulSoup object.
get_text_separator: The separator to use when calling get_text on the soup.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
def load(self) -> List[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, "r", encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~weaviate.py | from __future__ import annotations
import datetime
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
)
from uuid import uuid4
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import weaviate
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
def _create_weaviate_client(
url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> weaviate.Client:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
)
url = url or os.environ.get("WEAVIATE_URL")
api_key = api_key or os.environ.get("WEAVIATE_API_KEY")
auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None
return weaviate.Client(url=url, auth_client_secret=auth, **kwargs)
def _default_score_normalizer(val: float) -> float:
return 1 - 1 / (1 + np.exp(val))
def _json_serializable(value: Any) -> Any:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
class Weaviate(VectorStore):
"""`Weaviate` vector store.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain_community.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
by_text: bool = True,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self.relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return (
self.relevance_score_fn
if self.relevance_score_fn
else _default_score_normalizer
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
embeddings: Optional[List[List[float]]] = None
if self._embedding:
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
# Allow for ids (consistent w/ other methods)
# # Or uuids (backwards compatible w/ existing arg)
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
_id = get_valid_uuid(uuid4())
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
elif "ids" in kwargs:
_id = kwargs["ids"][i]
batch.add_data_object(
data_object=data_properties,
class_name=self._index_name,
uuid=_id,
vector=embeddings[i] if embeddings else None,
tenant=kwargs.get("tenant"),
)
ids.append(_id)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search when "
"_by_text=False"
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
def similarity_search_by_text(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_text(content).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
"max_marginal_relevance_search requires a suitable Embeddings object"
)
return self.max_marginal_relevance_search_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
results = (
query_obj.with_additional("vector")
.with_near_vector(vector)
.with_limit(fetch_k)
.do()
)
payload = results["data"]["Get"][self._index_name]
embeddings = [result["_additional"]["vector"] for result in payload]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
docs = []
for idx in mmr_selected:
text = payload[idx].pop(self._text_key)
payload[idx].pop("_additional")
meta = payload[idx]
docs.append(Document(page_content=text, metadata=meta))
return docs
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search_with_score"
)
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
embedded_query = self._embedding.embed_query(query)
if not self._by_text:
vector = {"vector": embedded_query}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
else:
result = (
query_obj.with_near_text(content)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(res["_additional"]["vector"], embedded_query)
docs_and_scores.append((Document(page_content=text, metadata=res), score))
return docs_and_scores
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
client: Optional[weaviate.Client] = None,
weaviate_url: Optional[str] = None,
weaviate_api_key: Optional[str] = None,
batch_size: Optional[int] = None,
index_name: Optional[str] = None,
text_key: str = "text",
by_text: bool = False,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
**kwargs: Any,
) -> Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
try:
from weaviate.util import get_valid_uuid
except ImportError as e:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
) from e
client = client or _create_weaviate_client(
url=weaviate_url,
api_key=weaviate_api_key,
)
if batch_size:
client.batch.configure(batch_size=batch_size)
index_name = index_name or f"LangChain_{uuid4().hex}"
schema = _default_schema(index_name)
# check whether the index already exists
if not client.schema.exists(index_name):
client.schema.create_class(schema)
embeddings = embedding.embed_documents(texts) if embedding else None
attributes = list(metadatas[0].keys()) if metadatas else None
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
if "uuids" in kwargs:
uuids = kwargs.pop("uuids")
else:
uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))]
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {
text_key: text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = uuids[i]
# if an embedding strategy is not provided, we let
# weaviate create the embedding. Note that this will only
# work if weaviate has been installed with a vectorizer module
# like text2vec-contextionary for example
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(
client,
index_name,
text_key,
embedding=embedding,
attributes=attributes,
relevance_score_fn=relevance_score_fn,
by_text=by_text,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
# TODO: Check if this can be done in bulk
for id in ids:
self._client.data_object.delete(uuid=id)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~history_aware_retriever.py | from __future__ import annotations
from libs.core.langchain_core.language_models import LanguageModelLike
from libs.core.langchain_core.output_parsers import StrOutputParser
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.retrievers import RetrieverLike, RetrieverOutputLike
from libs.core.langchain_core.runnables import RunnableBranch
def create_history_aware_retriever(
llm: LanguageModelLike,
retriever: RetrieverLike,
prompt: BasePromptTemplate,
) -> RetrieverOutputLike:
"""Create a chain that takes conversation history and returns documents.
If there is no `chat_history`, then the `input` is just passed directly to the
retriever. If there is `chat_history`, then the prompt and LLM will be used
to generate a search query. That search query is then passed to the retriever.
Args:
llm: Language model to use for generating a search term given chat history
retriever: RetrieverLike object that takes a string as input and outputs
a list of Documents.
prompt: The prompt used to generate the search query for the retriever.
Returns:
An LCEL Runnable. The runnable input must take in `input`, and if there
is chat history should take it in the form of `chat_history`.
The Runnable output is a list of Documents
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import create_history_aware_retriever
from langchain import hub
rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
llm = ChatOpenAI()
retriever = ...
chat_retriever_chain = create_history_aware_retriever(
llm, retriever, rephrase_prompt
)
chain.invoke({"input": "...", "chat_history": })
"""
if "input" not in prompt.input_variables:
raise ValueError(
"Expected `input` to be a prompt variable, "
f"but got {prompt.input_variables}"
)
retrieve_documents: RetrieverOutputLike = RunnableBranch(
(
# Both empty string and empty list evaluate to False
lambda x: not x.get("chat_history", False),
# If no chat history, then we just pass input to retriever
(lambda x: x["input"]) | retriever,
),
# If chat history, then we pass inputs to LLM chain, then to retriever
prompt | llm | StrOutputParser() | retriever,
).with_config(run_name="chat_retriever_chain")
return retrieve_documents
| [] |
2024-01-10 | mth93/langchain | templates~research-assistant~research_assistant~writer.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from libs.core.langchain_core.output_parsers import StrOutputParser
from libs.core.langchain_core.runnables import ConfigurableField
WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501
# Report prompts from https://github.com/assafelovic/gpt-researcher/blob/master/gpt_researcher/master/prompts.py
RESEARCH_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Using the above information, answer the following question or topic: "{question}" in a detailed report -- \
The report should focus on the answer to the question, should be well structured, informative, \
in depth, with facts and numbers if available and a minimum of 1,200 words.
You should strive to write the report as long as you can using all relevant and necessary information provided.
You must write the report with markdown syntax.
You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.
Write all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.
You must write the report in apa format.
Please do your best, this is very important to my career.""" # noqa: E501
RESOURCE_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Based on the above information, generate a bibliography recommendation report for the following question or topic: "{question}". \
The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. \
Focus on the relevance, reliability, and significance of each source. \
Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. \
Include relevant facts, figures, and numbers whenever available. \
The report should have a minimum length of 1,200 words.
Please do your best, this is very important to my career.""" # noqa: E501
OUTLINE_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Using the above information, generate an outline for a research report in Markdown syntax for the following question or topic: "{question}". \
The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. \
The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. \
Use appropriate Markdown syntax to format the outline and ensure readability.
Please do your best, this is very important to my career.""" # noqa: E501
model = ChatOpenAI(temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", RESEARCH_REPORT_TEMPLATE),
]
).configurable_alternatives(
ConfigurableField("report_type"),
default_key="research_report",
resource_report=ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", RESOURCE_REPORT_TEMPLATE),
]
),
outline_report=ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", OUTLINE_REPORT_TEMPLATE),
]
),
)
chain = prompt | model | StrOutputParser()
| [
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nUsing the above information, answer the following question or topic: \"{question}\" in a detailed report -- The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available and a minimum of 1,200 words.\\n\\nYou should strive to write the report as long as you can using all relevant and necessary information provided.\\nYou must write the report with markdown syntax.\\nYou MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.\\nWrite all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.\\nYou must write the report in apa format.\\nPlease do your best, this is very important to my career.')]",
"Information: \n--------\n{research_summary}\n--------\n\nBased on the above information, generate a bibliography recommendation report for the following question or topic: \"{question}\". The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. Focus on the relevance, reliability, and significance of each source. Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. Include relevant facts, figures, and numbers whenever available. The report should have a minimum length of 1,200 words.\n\nPlease do your best, this is very important to my career.",
"Information: \n--------\n{research_summary}\n--------\n\nUsing the above information, generate an outline for a research report in Markdown syntax for the following question or topic: \"{question}\". The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. Use appropriate Markdown syntax to format the outline and ensure readability.\n\nPlease do your best, this is very important to my career.",
"Information: \n--------\n{research_summary}\n--------\n\nUsing the above information, answer the following question or topic: \"{question}\" in a detailed report -- The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available and a minimum of 1,200 words.\n\nYou should strive to write the report as long as you can using all relevant and necessary information provided.\nYou must write the report with markdown syntax.\nYou MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.\nWrite all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.\nYou must write the report in apa format.\nPlease do your best, this is very important to my career.",
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nUsing the above information, generate an outline for a research report in Markdown syntax for the following question or topic: \"{question}\". The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. Use appropriate Markdown syntax to format the outline and ensure readability.\\n\\nPlease do your best, this is very important to my career.')]",
"research_report",
"report_type",
"You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.",
"{question}",
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nBased on the above information, generate a bibliography recommendation report for the following question or topic: \"{question}\". The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. Focus on the relevance, reliability, and significance of each source. Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. Include relevant facts, figures, and numbers whenever available. The report should have a minimum length of 1,200 words.\\n\\nPlease do your best, this is very important to my career.')]"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~combine_documents~refine.py | """Combine documents by doing a first pass and then refining on more documents."""
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.prompts import BasePromptTemplate, format_document
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import (
BaseCombineDocumentsChain,
)
from langchain.chains.llm import LLMChain
def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class RefineDocumentsChain(BaseCombineDocumentsChain):
"""Combine documents by doing a first pass and then refining on more documents.
This algorithm first calls `initial_llm_chain` on the first document, passing
that first document in with the variable name `document_variable_name`, and
produces a new variable with the variable name `initial_response_name`.
Then, it loops over every remaining document. This is called the "refine" step.
It calls `refine_llm_chain`,
passing in that document with the variable name `document_variable_name`
as well as the previous response with the variable name `initial_response_name`.
Example:
.. code-block:: python
from langchain.chains import RefineDocumentsChain, LLMChain
from libs.core.langchain_core.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
initial_llm_chain = LLMChain(llm=llm, prompt=prompt)
initial_response_name = "prev_response"
# The prompt here should take as an input variable the
# `document_variable_name` as well as `initial_response_name`
prompt_refine = PromptTemplate.from_template(
"Here's your first summary: {prev_response}. "
"Now add to it based on the following context: {context}"
)
refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine)
chain = RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
)
"""
initial_llm_chain: LLMChain
"""LLM chain to use on initial document."""
refine_llm_chain: LLMChain
"""LLM chain to use when refining."""
document_variable_name: str
"""The variable name in the initial_llm_chain to put the documents in.
If only one variable in the initial_llm_chain, this need not be provided."""
initial_response_name: str
"""The variable name to format the initial response in when refining."""
document_prompt: BasePromptTemplate = Field(
default_factory=_get_default_document_prompt
)
"""Prompt to use to format each document, gets passed to `format_document`."""
return_intermediate_steps: bool = False
"""Return the results of the refine steps in the output."""
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
return _output_keys
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_return_intermediate_steps(cls, values: Dict) -> Dict:
"""For backwards compatibility."""
if "return_refine_steps" in values:
values["return_intermediate_steps"] = values["return_refine_steps"]
del values["return_refine_steps"]
return values
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def combine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine by mapping first chain over all, then stuffing into final chain.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
async def acombine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Async combine by mapping a first chain over all, then stuffing
into a final chain.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = await self.initial_llm_chain.apredict(callbacks=callbacks, **inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = await self.refine_llm_chain.apredict(callbacks=callbacks, **inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
def _construct_result(self, refine_steps: List[str], res: str) -> Tuple[str, dict]:
if self.return_intermediate_steps:
extra_return_dict = {"intermediate_steps": refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
def _construct_refine_inputs(self, doc: Document, res: str) -> Dict[str, Any]:
return {
self.document_variable_name: format_document(doc, self.document_prompt),
self.initial_response_name: res,
}
def _construct_initial_inputs(
self, docs: List[Document], **kwargs: Any
) -> Dict[str, Any]:
base_info = {"page_content": docs[0].page_content}
base_info.update(docs[0].metadata)
document_info = {k: base_info[k] for k in self.document_prompt.input_variables}
base_inputs: dict = {
self.document_variable_name: self.document_prompt.format(**document_info)
}
inputs = {**base_inputs, **kwargs}
return inputs
@property
def _chain_type(self) -> str:
return "refine_documents_chain"
| [
"{page_content}"
] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~cache~test_upstash_redis_cache.py | """Test Upstash Redis cache functionality."""
import uuid
import pytest
from libs.core.langchain_core.outputs import Generation, LLMResult
import langchain
from langchain.cache import UpstashRedisCache
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
def random_string() -> str:
return str(uuid.uuid4())
@pytest.mark.requires("upstash_redis")
def test_redis_cache_ttl() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
langchain.llm_cache.update("foo", "bar", [Generation(text="fizz")])
key = langchain.llm_cache._key("foo", "bar")
assert langchain.llm_cache.redis.pttl(key) > 0
@pytest.mark.requires("upstash_redis")
def test_redis_cache() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
lookup_output = langchain.llm_cache.lookup("foo", llm_string)
if lookup_output and len(lookup_output) > 0:
assert lookup_output == expected_output.generations[0]
langchain.llm_cache.clear()
output = llm.generate(["foo"])
assert output != expected_output
langchain.llm_cache.redis.flushall()
def test_redis_cache_multi() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
output = llm.generate(
["foo"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz"), Generation(text="Buzz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
langchain.llm_cache.clear()
@pytest.mark.requires("upstash_redis")
def test_redis_cache_chat() -> None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
with pytest.warns():
llm.predict("foo")
langchain.llm_cache.redis.flushall()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~lakefs.py | import os
import tempfile
import urllib.parse
from typing import Any, List, Optional
from urllib.parse import urljoin
import requests
from libs.core.langchain_core.documents import Document
from requests.auth import HTTPBasicAuth
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredBaseLoader
class LakeFSClient:
"""Client for lakeFS."""
def __init__(
self,
lakefs_access_key: str,
lakefs_secret_key: str,
lakefs_endpoint: str,
):
self.__endpoint = "/".join([lakefs_endpoint, "api", "v1/"])
self.__auth = HTTPBasicAuth(lakefs_access_key, lakefs_secret_key)
try:
health_check = requests.get(
urljoin(self.__endpoint, "healthcheck"), auth=self.__auth
)
health_check.raise_for_status()
except Exception:
raise ValueError(
"lakeFS server isn't accessible. Make sure lakeFS is running."
)
def ls_objects(
self, repo: str, ref: str, path: str, presign: Optional[bool]
) -> List:
qp = {"prefix": path, "presign": presign}
eqp = urllib.parse.urlencode(qp)
objects_ls_endpoint = urljoin(
self.__endpoint, f"repositories/{repo}/refs/{ref}/objects/ls?{eqp}"
)
olsr = requests.get(objects_ls_endpoint, auth=self.__auth)
olsr.raise_for_status()
olsr_json = olsr.json()
return list(
map(
lambda res: (res["path"], res["physical_address"]), olsr_json["results"]
)
)
def is_presign_supported(self) -> bool:
config_endpoint = self.__endpoint + "config"
response = requests.get(config_endpoint, auth=self.__auth)
response.raise_for_status()
config = response.json()
return config["storage_config"]["pre_sign_support"]
class LakeFSLoader(BaseLoader):
"""Load from `lakeFS`."""
repo: str
ref: str
path: str
def __init__(
self,
lakefs_access_key: str,
lakefs_secret_key: str,
lakefs_endpoint: str,
repo: Optional[str] = None,
ref: Optional[str] = "main",
path: Optional[str] = "",
):
"""
:param lakefs_access_key: [required] lakeFS server's access key
:param lakefs_secret_key: [required] lakeFS server's secret key
:param lakefs_endpoint: [required] lakeFS server's endpoint address,
ex: https://example.my-lakefs.com
:param repo: [optional, default = ''] target repository
:param ref: [optional, default = 'main'] target ref (branch name,
tag, or commit ID)
:param path: [optional, default = ''] target path
"""
self.__lakefs_client = LakeFSClient(
lakefs_access_key, lakefs_secret_key, lakefs_endpoint
)
self.repo = "" if repo is None or repo == "" else str(repo)
self.ref = "main" if ref is None or ref == "" else str(ref)
self.path = "" if path is None else str(path)
def set_path(self, path: str) -> None:
self.path = path
def set_ref(self, ref: str) -> None:
self.ref = ref
def set_repo(self, repo: str) -> None:
self.repo = repo
def load(self) -> List[Document]:
self.__validate_instance()
presigned = self.__lakefs_client.is_presign_supported()
docs: List[Document] = []
objs = self.__lakefs_client.ls_objects(
repo=self.repo, ref=self.ref, path=self.path, presign=presigned
)
for obj in objs:
lakefs_unstructured_loader = UnstructuredLakeFSLoader(
obj[1], self.repo, self.ref, obj[0], presigned
)
docs.extend(lakefs_unstructured_loader.load())
return docs
def __validate_instance(self) -> None:
if self.repo is None or self.repo == "":
raise ValueError(
"no repository was provided. use `set_repo` to specify a repository"
)
if self.ref is None or self.ref == "":
raise ValueError("no ref was provided. use `set_ref` to specify a ref")
if self.path is None:
raise ValueError("no path was provided. use `set_path` to specify a path")
class UnstructuredLakeFSLoader(UnstructuredBaseLoader):
"""Load from `lakeFS` as unstructured data."""
def __init__(
self,
url: str,
repo: str,
ref: str = "main",
path: str = "",
presign: bool = True,
**unstructured_kwargs: Any,
):
"""Initialize UnstructuredLakeFSLoader.
Args:
:param lakefs_access_key:
:param lakefs_secret_key:
:param lakefs_endpoint:
:param repo:
:param ref:
"""
super().__init__(**unstructured_kwargs)
self.url = url
self.repo = repo
self.ref = ref
self.path = path
self.presign = presign
def _get_metadata(self) -> dict:
return {"repo": self.repo, "ref": self.ref, "path": self.path}
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
local_prefix = "local://"
if self.presign:
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.path.split('/')[-1]}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
response = requests.get(self.url)
response.raise_for_status()
with open(file_path, mode="wb") as file:
file.write(response.content)
return partition(filename=file_path)
elif not self.url.startswith(local_prefix):
raise ValueError(
"Non pre-signed URLs are supported only with 'local' blockstore"
)
else:
local_path = self.url[len(local_prefix) :]
return partition(filename=local_path)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~cohere.py | from typing import Any, Dict, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v3.0",
cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
max_retries: Optional[int] = None
"""Maximum number of retries to make when generating."""
request_timeout: Optional[float] = None
"""Timeout in seconds for the Cohere API request."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
max_retries = values.get("max_retries")
request_timeout = values.get("request_timeout")
try:
import cohere
client_name = values["user_agent"]
values["client"] = cohere.Client(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
values["async_client"] = cohere.AsyncClient(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
embeddings = self.client.embed(
model=self.model,
texts=texts,
input_type=input_type,
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings]
async def aembed(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
embeddings = await self.async_client.embed(
model=self.model,
texts=texts,
input_type=input_type,
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, input_type="search_document")
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return await self.aembed(texts, input_type="search_document")
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed([text], input_type="search_query")[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return (await self.aembed([text], input_type="search_query"))[0]
| [] |
2024-01-10 | mth93/langchain | libs~core~langchain_core~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import uuid
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import copy_context
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Callable,
Coroutine,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from langsmith.run_helpers import get_run_tree_context
from tenacity import RetryCallState
from libs.core.langchain_core.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from libs.core.langchain_core.callbacks.stdout import StdOutCallbackHandler
from libs.core.langchain_core.messages import BaseMessage, get_buffer_string
from libs.core.langchain_core.utils.env import env_var_is_set
if TYPE_CHECKING:
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
logger = logging.getLogger(__name__)
def _get_debug() -> bool:
from libs.core.langchain_core.globals import get_debug
return get_debug()
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: Optional[CallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
""" # noqa: E501
from libs.core.langchain_core.tracers.context import _get_trace_callbacks
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: Optional[AsyncCallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (AsyncCallbackManager, optional): The async callback manager to use,
which manages tracing and other callback behavior.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Example:
.. code-block:: python
llm_input = "Foo"
async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the async callback manager for the chain group
res = await llm.apredict(llm_input, callbacks=manager)
await manager.on_chain_end({"output": res})
""" # noqa: E501
from libs.core.langchain_core.tracers.context import _get_trace_callbacks
cb = _get_trace_callbacks(
project_name, example_id, callback_manager=callback_manager
)
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = await cm.on_chain_start(
{"name": group_name}, inputs or {}, run_id=run_id
)
child_cm = run_manager.get_child()
group_cm = AsyncCallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
await run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
await run_manager.on_chain_end({})
def handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
handler_name = handler.__class__.__name__
logger.warning(
f"NotImplementedError in {handler_name}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
finally:
if coros:
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
# If we try to submit this coroutine to the running loop
# we end up in a deadlock, as we'd have gotten here from a
# running coroutine, which we cannot interrupt to run this one.
# The solution is to create a new loop in a new thread.
with ThreadPoolExecutor(1) as executor:
executor.submit(
cast(Callable, copy_context().run), _run_coros, coros
).result()
else:
_run_coros(coros)
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None:
if hasattr(asyncio, "Runner"):
# Python 3.11+
# Run the coroutines in a new event loop, taking care to
# - install signal handlers
# - run pending tasks scheduled by `coros`
# - close asyncgens and executors
# - close the loop
with asyncio.Runner() as runner:
# Run the coroutine, get the result
for coro in coros:
runner.run(coro)
# Run pending tasks scheduled by coros until they are all done
while pending := asyncio.all_tasks(runner.get_loop()):
runner.run(asyncio.wait(pending))
else:
# Before Python 3.11 we need to run each coroutine in a new event loop
# as the Runner api is not available.
for coro in coros:
asyncio.run(coro)
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None,
cast(
Callable,
functools.partial(
copy_context().run, event, *args, **kwargs
),
),
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
async def ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler,
event_name,
ignore_condition_name,
*args,
**kwargs,
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Sync Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class AsyncRunManager(BaseRunManager, ABC):
"""Async Run Manager."""
@abstractmethod
def get_sync(self) -> RunManager:
"""Get the equivalent sync RunManager.
Returns:
RunManager: The sync RunManager.
"""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
await ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
kwargs (Any): Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
def get_sync(self) -> CallbackManagerForLLMRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForLLMRun: The sync RunManager.
"""
return CallbackManagerForLLMRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
kwargs (Any): Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
await ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_sync(self) -> CallbackManagerForChainRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForChainRun: The sync RunManager.
"""
return CallbackManagerForChainRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
await ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_sync(self) -> CallbackManagerForToolRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForToolRun: The sync RunManager.
"""
return CallbackManagerForToolRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
def get_sync(self) -> CallbackManagerForRetrieverRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForRetrieverRun: The sync RunManager.
"""
return CallbackManagerForRetrieverRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that handles callbacks from LangChain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class CallbackManagerForChainGroup(CallbackManager):
"""Callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
parent_run_id: Optional[UUID] = None,
*,
parent_run_manager: CallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def copy(self) -> CallbackManagerForChainGroup:
return self.__class__(
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
parent_run_manager=self.parent_run_manager,
)
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
"""Async callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
parent_run_id: Optional[UUID] = None,
*,
parent_run_manager: AsyncCallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def copy(self) -> AsyncCallbackManagerForChainGroup:
return self.__class__(
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
parent_run_manager=self.parent_run_manager,
)
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
await self.parent_run_manager.on_chain_end(outputs, **kwargs)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
await self.parent_run_manager.on_chain_error(error, **kwargs)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
H = TypeVar("H", bound=BaseCallbackHandler, covariant=True)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
from libs.core.langchain_core.tracers.context import (
_configure_hooks,
_get_tracer_project,
_tracing_v2_is_enabled,
tracing_v2_callback_var,
)
run_tree = get_run_tree_context()
parent_run_id = None if run_tree is None else getattr(run_tree, "id")
callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
parent_run_id=parent_run_id,
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers.copy(),
inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags.copy(),
inheritable_tags=inheritable_callbacks.inheritable_tags.copy(),
metadata=inheritable_callbacks.metadata.copy(),
inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(),
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = _tracing_v2_is_enabled()
tracer_project = _get_tracer_project()
debug = _get_debug()
if verbose or debug or tracing_v2_enabled_:
from libs.core.langchain_core.tracers.langchain import LangChainTracer
from libs.core.langchain_core.tracers.stdout import ConsoleCallbackHandler
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
# We will use ConsoleCallbackHandler instead of StdOutCallbackHandler
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
for var, inheritable, handler_class, env_var in _configure_hooks:
create_one = (
env_var is not None
and env_var_is_set(env_var)
and handler_class is not None
)
if var.get() is not None or create_one:
var_handler = var.get() or cast(Type[BaseCallbackHandler], handler_class)()
if handler_class is None:
if not any(
handler is var_handler # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(var_handler, inheritable)
else:
if not any(
isinstance(handler, handler_class)
for handler in callback_manager.handlers
):
callback_manager.add_handler(var_handler, inheritable)
return callback_manager
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~azure_cognitive_services~image_analysis.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.tools import BaseTool
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
)
logger = logging.getLogger(__name__)
class AzureCogsImageAnalysisTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Image Analysis API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
vision_service: Any #: :meta private:
analysis_options: Any #: :meta private:
name: str = "azure_cognitive_services_image_analysis"
description: str = (
"A wrapper around Azure Cognitive Services Image Analysis. "
"Useful for when you need to analyze images. "
"Input should be a url to an image."
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.vision as sdk
values["vision_service"] = sdk.VisionServiceOptions(
endpoint=azure_cogs_endpoint, key=azure_cogs_key
)
values["analysis_options"] = sdk.ImageAnalysisOptions()
values["analysis_options"].features = (
sdk.ImageAnalysisFeature.CAPTION
| sdk.ImageAnalysisFeature.OBJECTS
| sdk.ImageAnalysisFeature.TAGS
| sdk.ImageAnalysisFeature.TEXT
)
except ImportError:
raise ImportError(
"azure-ai-vision is not installed. "
"Run `pip install azure-ai-vision` to install."
)
return values
def _image_analysis(self, image_path: str) -> Dict:
try:
import azure.ai.vision as sdk
except ImportError:
pass
image_src_type = detect_file_src_type(image_path)
if image_src_type == "local":
vision_source = sdk.VisionSource(filename=image_path)
elif image_src_type == "remote":
vision_source = sdk.VisionSource(url=image_path)
else:
raise ValueError(f"Invalid image path: {image_path}")
image_analyzer = sdk.ImageAnalyzer(
self.vision_service, vision_source, self.analysis_options
)
result = image_analyzer.analyze()
res_dict = {}
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
if result.caption is not None:
res_dict["caption"] = result.caption.content
if result.objects is not None:
res_dict["objects"] = [obj.name for obj in result.objects]
if result.tags is not None:
res_dict["tags"] = [tag.name for tag in result.tags]
if result.text is not None:
res_dict["text"] = [line.content for line in result.text.lines]
else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
raise RuntimeError(
f"Image analysis failed.\n"
f"Reason: {error_details.reason}\n"
f"Details: {error_details.message}"
)
return res_dict
def _format_image_analysis_result(self, image_analysis_result: Dict) -> str:
formatted_result = []
if "caption" in image_analysis_result:
formatted_result.append("Caption: " + image_analysis_result["caption"])
if (
"objects" in image_analysis_result
and len(image_analysis_result["objects"]) > 0
):
formatted_result.append(
"Objects: " + ", ".join(image_analysis_result["objects"])
)
if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0:
formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"]))
if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0:
formatted_result.append("Text: " + ", ".join(image_analysis_result["text"]))
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
image_analysis_result = self._image_analysis(query)
if not image_analysis_result:
return "No good image analysis result was found"
return self._format_image_analysis_result(image_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}")
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~graphs~networkx_graph.py | from langchain_community.graphs.networkx_graph import (
KG_TRIPLE_DELIMITER,
KnowledgeTriple,
NetworkxEntityGraph,
get_entities,
parse_triples,
)
__all__ = [
"KG_TRIPLE_DELIMITER",
"KnowledgeTriple",
"parse_triples",
"get_entities",
"NetworkxEntityGraph",
]
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~ensemble.py | """
Ensemble retriever that ensemble the results of
multiple retrievers by using weighted Reciprocal Rank Fusion
"""
from typing import Any, Dict, List
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
class EnsembleRetriever(BaseRetriever):
"""Retriever that ensembles the multiple retrievers.
It uses a rank fusion.
Args:
retrievers: A list of retrievers to ensemble.
weights: A list of weights corresponding to the retrievers. Defaults to equal
weighting for all retrievers.
c: A constant added to the rank, controlling the balance between the importance
of high-ranked items and the consideration given to lower-ranked items.
Default is 60.
"""
retrievers: List[BaseRetriever]
weights: List[float]
c: int = 60
@root_validator(pre=True)
def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if not values.get("weights"):
n_retrievers = len(values["retrievers"])
values["weights"] = [1 / n_retrievers] * n_retrievers
return values
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
fused_documents = self.rank_fusion(query, run_manager)
return fused_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
fused_documents = await self.arank_fusion(query, run_manager)
return fused_documents
def rank_fusion(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.get_relevant_documents(
query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}")
)
for i, retriever in enumerate(self.retrievers)
]
# Enforce that retrieved docs are Documents for each list in retriever_docs
for i in range(len(retriever_docs)):
retriever_docs[i] = [
Document(page_content=doc) if not isinstance(doc, Document) else doc
for doc in retriever_docs[i]
]
# apply rank fusion
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
async def arank_fusion(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""
Asynchronously retrieve the results of the retrievers
and use rank_fusion_func to get the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = [
await retriever.aget_relevant_documents(
query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}")
)
for i, retriever in enumerate(self.retrievers)
]
# Enforce that retrieved docs are Documents for each list in retriever_docs
for i in range(len(retriever_docs)):
retriever_docs[i] = [
Document(page_content=doc) if not isinstance(doc, Document) else doc
for doc in retriever_docs[i]
]
# apply rank fusion
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
def weighted_reciprocal_rank(
self, doc_lists: List[List[Document]]
) -> List[Document]:
"""
Perform weighted Reciprocal Rank Fusion on multiple rank lists.
You can find more details about RRF here:
https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf
Args:
doc_lists: A list of rank lists, where each rank list contains unique items.
Returns:
list: The final aggregated list of items sorted by their weighted RRF
scores in descending order.
"""
if len(doc_lists) != len(self.weights):
raise ValueError(
"Number of rank lists must be equal to the number of weights."
)
# Create a union of all unique documents in the input doc_lists
all_documents = set()
for doc_list in doc_lists:
for doc in doc_list:
all_documents.add(doc.page_content)
# Initialize the RRF score dictionary for each document
rrf_score_dic = {doc: 0.0 for doc in all_documents}
# Calculate RRF scores for each document
for doc_list, weight in zip(doc_lists, self.weights):
for rank, doc in enumerate(doc_list, start=1):
rrf_score = weight * (1 / (rank + self.c))
rrf_score_dic[doc.page_content] += rrf_score
# Sort documents by their RRF scores in descending order
sorted_documents = sorted(
rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True
)
# Map the sorted page_content back to the original document objects
page_content_to_doc_map = {
doc.page_content: doc for doc_list in doc_lists for doc in doc_list
}
sorted_docs = [
page_content_to_doc_map[page_content] for page_content in sorted_documents
]
return sorted_docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_redis.py | import json
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import RedisChatMessageHistory
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup Redis as a message store
message_history = RedisChatMessageHistory(
url="redis://localhost:6379/0", ttl=10, session_id="my-test-session"
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Redis, so the next test run won't pick it up
memory.chat_memory.clear()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~azure_cosmos_db.py | from __future__ import annotations
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from libs.core.langchain_core.embeddings import Embeddings
from pymongo.collection import Collection
# Before Python 3.11 native StrEnum is not available
class CosmosDBSimilarityType(str, Enum):
"""Cosmos DB Similarity Type as enumerator."""
COS = "COS"
"""CosineSimilarity"""
IP = "IP"
"""inner - product"""
L2 = "L2"
"""Euclidean distance"""
CosmosDBDocumentType = TypeVar("CosmosDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 128
class AzureCosmosDBVectorSearch(VectorStore):
"""`Azure Cosmos DB for MongoDB vCore` vector store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB VCore Cluster
Example:
. code-block:: python
from langchain_community.vectorstores import AzureCosmosDBVectorSearch
from langchain_community.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = AzureCosmosDBVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[CosmosDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "vectorSearchIndex",
text_key: str = "textContent",
embedding_key: str = "vectorContent",
):
"""Constructor for AzureCosmosDBVectorSearch
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
index_name: Name of the Atlas Search index.
text_key: MongoDB field that will contain the text
for each document.
embedding_key: MongoDB field that will contain the embedding
for each document.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
@property
def embeddings(self) -> Embeddings:
return self._embedding
def get_index_name(self) -> str:
"""Returns the index name
Returns:
Returns the index name
"""
return self._index_name
@classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> AzureCosmosDBVectorSearch:
"""Creates an Instance of AzureCosmosDBVectorSearch from a Connection String
Args:
connection_string: The MongoDB vCore instance connection string
namespace: The namespace (database.collection)
embedding: The embedding utility
**kwargs: Dynamic keyword arguments
Returns:
an instance of the vector store
"""
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
def index_exists(self) -> bool:
"""Verifies if the specified index name during instance
construction exists on the collection
Returns:
Returns True on success and False if no such index exists
on the collection
"""
cursor = self._collection.list_indexes()
index_name = self._index_name
for res in cursor:
current_index_name = res.pop("name")
if current_index_name == index_name:
return True
return False
def delete_index(self) -> None:
"""Deletes the index specified during instance construction if it exists"""
if self.index_exists():
self._collection.drop_index(self._index_name)
# Raises OperationFailure on an error (e.g. trying to drop
# an index that does not exist)
def create_index(
self,
num_lists: int = 100,
dimensions: int = 1536,
similarity: CosmosDBSimilarityType = CosmosDBSimilarityType.COS,
) -> dict[str, Any]:
"""Creates an index using the index name specified at
instance construction
Setting the numLists parameter correctly is important for achieving
good accuracy and performance.
Since the vector store uses IVF as the indexing strategy,
you should create the index only after you
have loaded a large enough sample documents to ensure that the
centroids for the respective buckets are
faily distributed.
We recommend that numLists is set to documentCount/1000 for up
to 1 million documents
and to sqrt(documentCount) for more than 1 million documents.
As the number of items in your database grows, you should
tune numLists to be larger
in order to achieve good latency performance for vector search.
If you're experimenting with a new scenario or creating a
small demo, you can start with numLists
set to 1 to perform a brute-force search across all vectors.
This should provide you with the most
accurate results from the vector search, however be aware that
the search speed and latency will be slow.
After your initial setup, you should go ahead and tune
the numLists parameter using the above guidance.
Args:
num_lists: This integer is the number of clusters that the
inverted file (IVF) index uses to group the vector data.
We recommend that numLists is set to documentCount/1000
for up to 1 million documents and to sqrt(documentCount)
for more than 1 million documents.
Using a numLists value of 1 is akin to performing
brute-force search, which has limited performance
dimensions: Number of dimensions for vector similarity.
The maximum number of supported dimensions is 2000
similarity: Similarity metric to use with the IVF index.
Possible options are:
- CosmosDBSimilarityType.COS (cosine distance),
- CosmosDBSimilarityType.L2 (Euclidean distance), and
- CosmosDBSimilarityType.IP (inner product).
Returns:
An object describing the created index
"""
# prepare the command
create_index_commands = {
"createIndexes": self._collection.name,
"indexes": [
{
"name": self._index_name,
"key": {self._embedding_key: "cosmosSearch"},
"cosmosSearchOptions": {
"kind": "vector-ivf",
"numLists": num_lists,
"similarity": similarity,
"dimensions": dimensions,
},
}
],
}
# retrieve the database object
current_database = self._collection.database
# invoke the command from the database object
create_index_responses: dict[str, Any] = current_database.command(
create_index_commands
)
return create_index_responses
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
"""Used to Load Documents into the collection
Args:
texts: The list of documents strings to load
metadatas: The list of metadata objects associated with each document
Returns:
"""
# If the text is empty, then exit early
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in Cosmos DB
insert_result = self._collection.insert_many(to_insert) # type: ignore
return insert_result.inserted_ids
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection: Optional[Collection[CosmosDBDocumentType]] = None,
**kwargs: Any,
) -> AzureCosmosDBVectorSearch:
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vectorstore = cls(collection, embedding, **kwargs)
vectorstore.add_texts(texts, metadatas=metadatas)
return vectorstore
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
if ids is None:
raise ValueError("No document ids provided to delete.")
for document_id in ids:
self.delete_document_by_id(document_id)
return True
def delete_document_by_id(self, document_id: Optional[str] = None) -> None:
"""Removes a Specific Document by Id
Args:
document_id: The document identifier
"""
try:
from bson.objectid import ObjectId
except ImportError as e:
raise ImportError(
"Unable to import bson, please install with `pip install bson`."
) from e
if document_id is None:
raise ValueError("No document id provided to delete.")
self._collection.delete_one({"_id": ObjectId(document_id)})
def _similarity_search_with_score(
self, embeddings: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
"""Returns a list of documents with their scores
Args:
embeddings: The query vector
k: the number of documents to return
Returns:
A list of documents closest to the query vector
"""
pipeline: List[dict[str, Any]] = [
{
"$search": {
"cosmosSearch": {
"vector": embeddings,
"path": self._embedding_key,
"k": k,
},
"returnStoredSource": True,
}
},
{
"$project": {
"similarityScore": {"$meta": "searchScore"},
"document": "$$ROOT",
}
},
]
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
score = res.pop("similarityScore")
document_object_field = res.pop("document")
text = document_object_field.pop(self._text_key)
docs.append(
(Document(page_content=text, metadata=document_object_field), score)
)
return docs
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
embeddings = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(embeddings=embeddings, k=k)
return docs
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
# Retrieves the docs with similarity scores
# sorted by similarity scores in DESC order
docs = self._similarity_search_with_score(embedding, k=fetch_k)
# Re-ranks the docs using MMR
mmr_doc_indexes = maximal_marginal_relevance(
np.array(embedding),
[doc.metadata[self._embedding_key] for doc, _ in docs],
k=k,
lambda_mult=lambda_mult,
)
mmr_docs = [docs[i][0] for i in mmr_doc_indexes]
return mmr_docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
# compute the embeddings vector from the query string
embeddings = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embeddings, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult
)
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~blackboard.py | import contextlib
import re
from pathlib import Path
from typing import Any, List, Optional, Tuple
from urllib.parse import unquote
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.directory import DirectoryLoader
from langchain_community.document_loaders.pdf import PyPDFLoader
from langchain_community.document_loaders.web_base import WebBaseLoader
class BlackboardLoader(WebBaseLoader):
"""Load a `Blackboard` course.
This loader is not compatible with all Blackboard courses. It is only
compatible with courses that use the new Blackboard interface.
To use this loader, you must have the BbRouter cookie. You can get this
cookie by logging into the course and then copying the value of the
BbRouter cookie from the browser's developer tools.
Example:
.. code-block:: python
from langchain_community.document_loaders import BlackboardLoader
loader = BlackboardLoader(
blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1",
bbrouter="expires:12345...",
)
documents = loader.load()
""" # noqa: E501
def __init__(
self,
blackboard_course_url: str,
bbrouter: str,
load_all_recursively: bool = True,
basic_auth: Optional[Tuple[str, str]] = None,
cookies: Optional[dict] = None,
continue_on_failure: bool = False,
):
"""Initialize with blackboard course url.
The BbRouter cookie is required for most blackboard courses.
Args:
blackboard_course_url: Blackboard course url.
bbrouter: BbRouter cookie.
load_all_recursively: If True, load all documents recursively.
basic_auth: Basic auth credentials.
cookies: Cookies.
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
Raises:
ValueError: If blackboard course url is invalid.
"""
super().__init__(
web_paths=(blackboard_course_url), continue_on_failure=continue_on_failure
)
# Get base url
try:
self.base_url = blackboard_course_url.split("/webapps/blackboard")[0]
except IndexError:
raise IndexError(
"Invalid blackboard course url. "
"Please provide a url that starts with "
"https://<blackboard_url>/webapps/blackboard"
)
if basic_auth is not None:
self.session.auth = basic_auth
# Combine cookies
if cookies is None:
cookies = {}
cookies.update({"BbRouter": bbrouter})
self.session.cookies.update(cookies)
self.load_all_recursively = load_all_recursively
self.check_bs4()
def check_bs4(self) -> None:
"""Check if BeautifulSoup4 is installed.
Raises:
ImportError: If BeautifulSoup4 is not installed.
"""
try:
import bs4 # noqa: F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BlackboardLoader. "
"Please install it with `pip install beautifulsoup4`."
)
def load(self) -> List[Document]:
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f"Fetching documents from {url}")
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f"Fetching documents from {self.web_path}")
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
def _get_folder_path(self, soup: Any) -> str:
"""Get the folder path to save the Documents in.
Args:
soup: BeautifulSoup4 soup object.
Returns:
Folder path.
"""
# Get the course name
course_name = soup.find("span", {"id": "crumb_1"})
if course_name is None:
raise ValueError("No course name found.")
course_name = course_name.text.strip()
# Prepare the folder path
course_name_clean = (
unquote(course_name)
.replace(" ", "_")
.replace("/", "_")
.replace(":", "_")
.replace(",", "_")
.replace("?", "_")
.replace("'", "_")
.replace("!", "_")
.replace('"', "_")
)
# Get the folder path
folder_path = Path(".") / course_name_clean
return str(folder_path)
def _get_documents(self, soup: Any) -> List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
def _get_attachments(self, soup: Any) -> List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
# Get content list
content_list = soup.find("ul", {"class": "contentList"})
if content_list is None:
raise ValueError("No content list found.")
content_list: BeautifulSoup # type: ignore
# Get all attachments
attachments = []
for attachment in content_list.find_all("ul", {"class": "attachments"}):
attachment: Tag # type: ignore
for link in attachment.find_all("a"):
link: Tag # type: ignore
href = link.get("href")
# Only add if href is not None and does not start with #
if href is not None and not href.startswith("#"):
attachments.append(href)
return attachments
def _download_attachments(self, attachments: List[str]) -> None:
"""Download all attachments.
Args:
attachments: List of attachments.
"""
# Make sure the folder exists
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
# Download all attachments
for attachment in attachments:
self.download(attachment)
def _load_documents(self) -> List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
# Create the document loader
loader = DirectoryLoader(
path=self.folder_path,
glob="*.pdf",
loader_cls=PyPDFLoader, # type: ignore
)
# Load the documents
documents = loader.load()
# Return all documents
return documents
def _get_paths(self, soup: Any) -> List[str]:
"""Get all relative paths in the navbar."""
relative_paths = []
course_menu = soup.find("ul", {"class": "courseMenu"})
if course_menu is None:
raise ValueError("No course menu found.")
for link in course_menu.find_all("a"):
href = link.get("href")
if href is not None and href.startswith("/"):
relative_paths.append(href)
return relative_paths
def download(self, path: str) -> None:
"""Download a file from an url.
Args:
path: Path to the file.
"""
# Get the file content
response = self.session.get(self.base_url + path, allow_redirects=True)
# Get the filename
filename = self.parse_filename(response.url)
# Write the file to disk
with open(Path(self.folder_path) / filename, "wb") as f:
f.write(response.content)
def parse_filename(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == ".pdf":
return url_path.name
else:
return self._parse_filename_from_url(url)
def _parse_filename_from_url(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
Raises:
ValueError: If the filename could not be parsed.
"""
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
if filename_matches:
filename = filename_matches.group(1)
else:
raise ValueError(f"Could not parse filename from {url}")
if ".pdf" not in filename:
raise ValueError(f"Incorrect file type: {filename}")
filename = filename.split(".pdf")[0] + ".pdf"
filename = unquote(filename)
filename = filename.replace("%20", " ")
return filename
if __name__ == "__main__":
loader = BlackboardLoader(
"https://<YOUR BLACKBOARD URL"
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
"<YOUR BBROUTER COOKIE HERE>",
load_all_recursively=True,
)
documents = loader.load()
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_quip.py | from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.quip import QuipLoader
try:
from quip_api.quip import QuipClient # noqa: F401
quip_installed = True
except ImportError:
quip_installed = False
@pytest.fixture
def mock_quip(): # type: ignore
# mock quip_client
with patch("quip_api.quip.QuipClient") as mock_quip:
yield mock_quip
@pytest.mark.requires("quip_api")
class TestQuipLoader:
API_URL = "https://example-api.quip.com"
DOC_URL_PREFIX = ("https://example.quip.com",)
ACCESS_TOKEN = "api_token"
MOCK_FOLDER_IDS = ["ABC"]
MOCK_THREAD_IDS = ["ABC", "DEF"]
def test_quip_loader_initialization(self, mock_quip: MagicMock) -> None:
QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60)
mock_quip.assert_called_once_with(
access_token=self.ACCESS_TOKEN, base_url=self.API_URL, request_timeout=60
)
def test_quip_loader_load_date_invalid_args(self) -> None:
quip_loader = QuipLoader(
self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60
)
with pytest.raises(
ValueError,
match="Must specify at least one among `folder_ids`, `thread_ids` or "
"set `include_all`_folders as True",
):
quip_loader.load()
def test_quip_loader_load_data_by_folder_id(self, mock_quip: MagicMock) -> None:
mock_quip.get_folder.side_effect = [
self._get_mock_folder(self.MOCK_FOLDER_IDS[0])
]
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(folder_ids=[self.MOCK_FOLDER_IDS[0]])
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def test_quip_loader_load_data_all_folder(self, mock_quip: MagicMock) -> None:
mock_quip.get_authenticated_user.side_effect = [
self._get_mock_authenticated_user()
]
mock_quip.get_folder.side_effect = [
self._get_mock_folder(self.MOCK_FOLDER_IDS[0]),
]
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(include_all_folders=True)
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def test_quip_loader_load_data_by_thread_id(self, mock_quip: MagicMock) -> None:
mock_quip.get_thread.side_effect = [
self._get_mock_thread(self.MOCK_THREAD_IDS[0]),
self._get_mock_thread(self.MOCK_THREAD_IDS[1]),
]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(thread_ids=self.MOCK_THREAD_IDS)
assert mock_quip.get_folder.call_count == 0
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert (
documents[0].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}"
)
assert (
documents[1].metadata.get("source")
== f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}"
)
def _get_mock_quip_loader(self, mock_quip: MagicMock) -> QuipLoader:
quip_loader = QuipLoader(
self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60
)
quip_loader.quip_client = mock_quip
return quip_loader
def _get_mock_folder(self, folder_id: str) -> Dict:
return {
"folder": {
"title": "runbook",
"creator_id": "testing",
"folder_type": "shared",
"parent_id": "ABCD",
"inherit_mode": "inherit",
"color": "manila",
"id": f"{folder_id}",
"created_usec": 1668405728528904,
"updated_usec": 1697356632672453,
"link": "https://example.quip.com/YPH9OAR2Eu5",
},
"member_ids": [],
"children": [
{"thread_id": "ABC"},
{"thread_id": "DEF"},
],
}
def _get_mock_thread(self, thread_id: str) -> Dict:
return {
"thread": {
"author_id": "testing",
"thread_class": "document",
"owning_company_id": "ABC",
"id": f"{thread_id}",
"created_usec": 1690873126670055,
"updated_usec": 1690874891638991,
"title": f"Unit Test Doc {thread_id}",
"link": f"https://example.quip.com/{thread_id}",
"document_id": "ABC",
"type": "document",
"is_template": False,
"is_deleted": False,
},
"user_ids": [],
"shared_folder_ids": ["ABC"],
"expanded_user_ids": ["ABCDEFG"],
"invited_user_emails": [],
"access_levels": {"ABCD": {"access_level": "OWN"}},
"html": "<h1 id='temp:C:ABCD'>How to write Python Test </h1>",
}
def _get_mock_authenticated_user(self) -> Dict:
return {"shared_folder_ids": self.MOCK_FOLDER_IDS, "id": "Test"}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~xinference.py | """Wrapper around Xinference embedding models."""
from typing import Any, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
class XinferenceEmbeddings(Embeddings):
"""Xinference embedding models.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install xinference
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers.
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then you can use Xinference Embedding with LangChain.
Example:
.. code-block:: python
from langchain_community.embeddings import XinferenceEmbeddings
xinference = XinferenceEmbeddings(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
def __init__(
self, server_url: Optional[str] = None, model_uid: Optional[str] = None
):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference`."
) from e
super().__init__()
if server_url is None:
raise ValueError("Please provide server URL")
if model_uid is None:
raise ValueError("Please provide the model UID")
self.server_url = server_url
self.model_uid = model_uid
self.client = RESTfulClient(server_url)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using Xinference.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
model = self.client.get_model(self.model_uid)
embeddings = [
model.create_embedding(text)["data"][0]["embedding"] for text in texts
]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query of documents using Xinference.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
model = self.client.get_model(self.model_uid)
embedding_res = model.create_embedding(text)
embedding = embedding_res["data"][0]["embedding"]
return list(map(float, embedding))
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~directory.py | import concurrent
import logging
import random
from pathlib import Path
from typing import Any, List, Optional, Type, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.html_bs import BSHTMLLoader
from langchain_community.document_loaders.text import TextLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
]
logger = logging.getLogger(__name__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
class DirectoryLoader(BaseLoader):
"""Load from a directory."""
def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader,
loader_kwargs: Union[dict, None] = None,
recursive: bool = False,
show_progress: bool = False,
use_multithreading: bool = False,
max_concurrency: int = 4,
*,
sample_size: int = 0,
randomize_sample: bool = False,
sample_seed: Union[int, None] = None,
):
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
sample_size: The maximum number of files you would like to load from the
directory.
randomize_sample: Shuffle the files to get a random sample.
sample_seed: set the seed of the random shuffle for reproducibility.
"""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
self.sample_size = sample_size
self.randomize_sample = randomize_sample
self.sample_seed = sample_seed
def load_file(
self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]
) -> None:
"""Load a file.
Args:
item: File path.
path: Directory path.
docs: List of documents to append to.
pbar: Progress bar. Defaults to None.
"""
if item.is_file():
if _is_visible(item.relative_to(path)) or self.load_hidden:
try:
logger.debug(f"Processing file: {str(item)}")
sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load()
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(f"Error loading file {str(item)}: {e}")
else:
raise e
finally:
if pbar:
pbar.update(1)
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
if not p.exists():
raise FileNotFoundError(f"Directory not found: '{self.path}'")
if not p.is_dir():
raise ValueError(f"Expected directory, got file: '{self.path}'")
docs: List[Document] = []
items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob))
if self.sample_size > 0:
if self.randomize_sample:
randomizer = (
random.Random(self.sample_seed) if self.sample_seed else random
)
randomizer.shuffle(items) # type: ignore
items = items[: min(len(items), self.sample_size)]
pbar = None
if self.show_progress:
try:
from tqdm import tqdm
pbar = tqdm(total=len(items))
except ImportError as e:
logger.warning(
"To log the progress of DirectoryLoader you need to install tqdm, "
"`pip install tqdm`"
)
if self.silent_errors:
logger.warning(e)
else:
raise ImportError(
"To log the progress of DirectoryLoader "
"you need to install tqdm, "
"`pip install tqdm`"
)
if self.use_multithreading:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_concurrency
) as executor:
executor.map(lambda i: self.load_file(i, p, docs, pbar), items)
else:
for i in items:
self.load_file(i, p, docs, pbar)
if pbar:
pbar.close()
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~larksuite.py | import json
import urllib.request
from typing import Any, Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class LarkSuiteDocLoader(BaseLoader):
"""Load from `LarkSuite` (`FeiShu`)."""
def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load.
"""
self.domain = domain
self.access_token = access_token
self.document_id = document_id
def _get_larksuite_api_json_data(self, api_url: str) -> Any:
"""Get LarkSuite (FeiShu) API response json data."""
headers = {"Authorization": f"Bearer {self.access_token}"}
request = urllib.request.Request(api_url, headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
def lazy_load(self) -> Iterator[Document]:
"""Lazy load LarkSuite (FeiShu) document."""
api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents"
metadata_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}"
)
raw_content_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}/raw_content"
)
text = raw_content_json["data"]["content"]
metadata = {
"document_id": self.document_id,
"revision_id": metadata_json["data"]["document"]["revision_id"],
"title": metadata_json["data"]["document"]["title"],
}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load LarkSuite (FeiShu) document."""
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~confluence.py | import logging
from enum import Enum
from io import BytesIO
from typing import Any, Callable, Dict, List, Optional, Union
import requests
from libs.core.langchain_core.documents import Document
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ContentFormat(str, Enum):
"""Enumerator of the content formats of Confluence page."""
EDITOR = "body.editor"
EXPORT_VIEW = "body.export_view"
ANONYMOUS_EXPORT_VIEW = "body.anonymous_export_view"
STORAGE = "body.storage"
VIEW = "body.view"
def get_content(self, page: dict) -> str:
return page["body"][self.name.lower()]["value"]
class ConfluenceLoader(BaseLoader):
"""Load `Confluence` pages.
Port of https://llamahub.ai/l/confluence
This currently supports username/api_key, Oauth2 login or personal access token
authentication.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceReader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Confluence API supports difference format of page content. The storage format is the
raw XML representation for storage. The view format is the HTML representation for
viewing with macros are rendered as though it is viewed by users. You can pass
a enum `content_format` argument to `load()` to specify the content format, this is
set to `ContentFormat.STORAGE` by default, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`, `ContentFormat.STORAGE`,
and `ContentFormat.VIEW`.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain_community.document_loaders import ConfluenceLoader
loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345"
)
documents = loader.load(space_key="SPACE",limit=50)
# Server on perm
loader = ConfluenceLoader(
url="https://confluence.yoursite.com/",
username="me",
api_key="your_password",
cloud=False
)
documents = loader.load(space_key="SPACE",limit=50)
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param token: _description_, defaults to None
:type token: str, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(
url=url,
api_key=api_key,
username=username,
session=session,
oauth2=oauth2,
token=token,
)
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
try:
from atlassian import Confluence # noqa: F401
except ImportError:
raise ImportError(
"`atlassian` package not found, please run "
"`pip install atlassian-python-api`"
)
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
if session:
self.confluence = Confluence(url=url, session=session, **confluence_kwargs)
elif oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
elif token:
self.confluence = Confluence(
url=url, token=token, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
@staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided, "
"the other must be as well."
)
non_null_creds = list(
x is not None for x in ((api_key or username), session, oauth2, token)
)
if sum(non_null_creds) > 1:
all_names = ("(api_key, username)", "session", "oath2", "token")
provided = tuple(n for x, n in zip(non_null_creds, all_names) if x)
errors.append(
f"Cannot provide a value for more than one of: {all_names}. Received "
f"values for: {provided}"
)
if oauth2 and set(oauth2.keys()) != {
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
}:
errors.append(
"You have either omitted require keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`"
)
return errors or None
def load(
self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_restricted_content: bool = False,
include_archived_content: bool = False,
include_attachments: bool = False,
include_comments: bool = False,
content_format: ContentFormat = ContentFormat.STORAGE,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
ocr_languages: Optional[str] = None,
keep_markdown_format: bool = False,
keep_newlines: bool = False,
) -> List[Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`, "
"`label`, `cql` parameters."
)
docs = []
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
status="any" if include_archived_content else "current",
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
max_pages=max_pages,
)
ids_by_label = [page["id"] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(
self._search_content_by_cql,
cql=cql,
limit=limit,
max_pages=max_pages,
include_archived_spaces=include_archived_content,
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(
page_id=page_id, expand=f"{content_format.value},version"
)
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
)
docs.append(doc)
return docs
def _search_content_by_cql(
self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any
) -> List[dict]:
url = "rest/api/content/search"
params: Dict[str, Any] = {"cql": cql}
params.update(kwargs)
if include_archived_spaces is not None:
params["includeArchivedSpaces"] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get("results", [])
def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the result key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
def is_public_page(self, page: dict) -> bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page["id"])
return (
page["status"] == "current"
and not restrictions["read"]["restrictions"]["user"]["results"]
and not restrictions["read"]["restrictions"]["group"]["results"]
)
def process_pages(
self,
pages: List[dict],
include_restricted_content: bool,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> List[Document]:
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
docs.append(doc)
return docs
def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> Document:
if keep_markdown_format:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
"`markdownify` package not found, please run "
"`pip install markdownify`"
)
if include_comments or not keep_markdown_format:
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run "
"`pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
if keep_markdown_format:
# Use markdownify to keep the page Markdown style
text = markdownify(content, heading_style="ATX") + "".join(attachment_texts)
else:
if keep_newlines:
text = BeautifulSoup(
content.replace("</p>", "\n</p>").replace("<br />", "\n"), "lxml"
).get_text(" ") + "".join(attachment_texts)
else:
text = BeautifulSoup(content, "lxml").get_text(
" ", strip=True
) + "".join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
" ", strip=True
)
for comment in comments
]
text = text + "".join(comment_texts)
metadata = {
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
}
if "version" in page and "when" in page["version"]:
metadata["when"] = page["version"]["when"]
return Document(
page_content=text,
metadata=metadata,
)
def process_attachment(
self,
page_id: str,
ocr_languages: Optional[str] = None,
) -> List[str]:
try:
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`Pillow` package not found, " "please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
try:
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url, ocr_languages)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url, ocr_languages)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f"Attachment not found at {absolute_url}")
continue
else:
raise
return texts
def process_pdf(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from pdf2image import convert_from_bytes # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, "
"please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image, lang=ocr_languages)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
def process_image(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, "
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
def process_doc(self, link: str) -> str:
try:
import docx2txt # noqa: F401
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
def process_xls(self, link: str) -> str:
import io
import os
try:
import xlrd # noqa: F401
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
try:
import pandas as pd
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
filename = os.path.basename(link)
# Getting the whole content of the url after filename,
# Example: ".csv?version=2&modificationDate=1631800010678&cacheVersion=1&api=v2"
file_extension = os.path.splitext(filename)[1]
if file_extension.startswith(
".csv"
): # if the extension found in the url is ".csv"
content_string = response.content.decode("utf-8")
df = pd.read_csv(io.StringIO(content_string))
text += df.to_string(index=False, header=False) + "\n\n"
else:
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n"
text += "\n"
return text
def process_svg(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
from reportlab.graphics import renderPM # noqa: F401
from svglib.svglib import svg2rlg # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, "
"please run `pip install pytesseract Pillow reportlab svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~llms~test_minimax.py | """Test Minimax llm"""
from typing import cast
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.minimax import Minimax
def test_api_key_is_secret_string() -> None:
llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id")
assert isinstance(llm.minimax_api_key, SecretStr)
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("MINIMAX_API_KEY", "secret-api-key")
monkeypatch.setenv("MINIMAX_GROUP_ID", "group_id")
llm = Minimax()
print(llm.minimax_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id")
print(llm.minimax_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_uses_actual_secret_value_from_secretstr() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id")
assert cast(SecretStr, llm.minimax_api_key).get_secret_value() == "secret-api-key"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~nlpcloud.py | from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import NLPCloud
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True
"""Whether to use a GPU or not"""
lang: str = "en"
"""Language to use (multilingual addon)"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
num_beams: int = 1
"""Number of beams for beam search."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["nlpcloud_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "nlpcloud_api_key", "NLPCLOUD_API_KEY")
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"],
values["nlpcloud_api_key"].get_secret_value(),
gpu=values["gpu"],
lang=values["lang"],
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"num_beams": self.num_beams,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**{"gpu": self.gpu},
**{"lang": self.lang},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~doc_intelligence.py | from typing import Iterator, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
class AzureAIDocumentIntelligenceLoader(BaseLoader):
"""Loads a PDF with Azure Document Intelligence"""
def __init__(
self,
api_endpoint: str,
api_key: str,
file_path: Optional[str] = None,
url_path: Optional[str] = None,
api_version: Optional[str] = None,
api_model: str = "prebuilt-layout",
mode: str = "markdown",
) -> None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a AzureAIDocumentIntelligenceParser object to be
used for parsing files using the Azure Document Intelligence API. The load
method generates Documents whose content representations are determined by the
mode parameter.
Parameters:
-----------
api_endpoint: str
The API endpoint to use for DocumentIntelligenceClient construction.
api_key: str
The API key to use for DocumentIntelligenceClient construction.
file_path : Optional[str]
The path to the file that needs to be loaded.
Either file_path or url_path must be specified.
url_path : Optional[str]
The URL to the file that needs to be loaded.
Either file_path or url_path must be specified.
api_version: Optional[str]
The API version for DocumentIntelligenceClient. Setting None to use
the default value from SDK.
api_model: str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = AzureAIDocumentIntelligenceLoader(
... file_path="path/to/file",
... api_endpoint="https://endpoint.azure.com",
... api_key="APIKEY",
... api_version="2023-10-31-preview",
... model="prebuilt-document"
... )
"""
assert (
file_path is not None or url_path is not None
), "file_path or url_path must be provided"
self.file_path = file_path
self.url_path = url_path
self.parser = AzureAIDocumentIntelligenceParser(
api_endpoint=api_endpoint,
api_key=api_key,
api_version=api_version,
api_model=api_model,
mode=mode,
)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
if self.file_path is not None:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
else:
yield from self.parser.parse_url(self.url_path)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_zilliz.py | """Test Zilliz functionality."""
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores import Zilliz
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _zilliz_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Zilliz:
return Zilliz.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
connection_args={
"uri": "",
"user": "",
"password": "",
"secure": True,
},
drop_old=drop,
)
def test_zilliz() -> None:
"""Test end to end construction and search."""
docsearch = _zilliz_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_zilliz_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert scores[0] < scores[1] < scores[2]
def test_zilliz_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="baz", metadata={"page": 2}),
]
def test_zilliz_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_zilliz_no_drop() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
del docsearch
docsearch = _zilliz_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
# if __name__ == "__main__":
# test_zilliz()
# test_zilliz_with_score()
# test_zilliz_max_marginal_relevance_search()
# test_zilliz_add_extra()
# test_zilliz_no_drop()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~memory~motorhead_memory.py | from typing import Any, Dict, List, Optional
import requests
from libs.core.langchain_core.messages import get_buffer_string
from langchain.memory.chat_memory import BaseChatMemory
MANAGED_URL = "https://api.getmetal.io/v1/motorhead"
# LOCAL_URL = "http://localhost:8080"
class MotorheadMemory(BaseChatMemory):
"""Chat message memory backed by Motorhead service."""
url: str = MANAGED_URL
timeout: int = 3000
memory_key: str = "history"
session_id: str
context: Optional[str] = None
# Managed Params
api_key: Optional[str] = None
client_id: Optional[str] = None
def __get_headers(self) -> Dict[str, str]:
is_managed = self.url == MANAGED_URL
headers = {
"Content-Type": "application/json",
}
if is_managed and not (self.api_key and self.client_id):
raise ValueError(
"""
You must provide an API key or a client ID to use the managed
version of Motorhead. Visit https://getmetal.io for more information.
"""
)
if is_managed and self.api_key and self.client_id:
headers["x-metal-api-key"] = self.api_key
headers["x-metal-client-id"] = self.client_id
return headers
async def init(self) -> None:
res = requests.get(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
headers=self.__get_headers(),
)
res_data = res.json()
res_data = res_data.get("data", res_data) # Handle Managed Version
messages = res_data.get("messages", [])
context = res_data.get("context", "NONE")
for message in reversed(messages):
if message["role"] == "AI":
self.chat_memory.add_ai_message(message["content"])
else:
self.chat_memory.add_user_message(message["content"])
if context and context != "NONE":
self.context = context
def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]:
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
json={
"messages": [
{"role": "Human", "content": f"{input_str}"},
{"role": "AI", "content": f"{output_str}"},
]
},
headers=self.__get_headers(),
)
super().save_context(inputs, outputs)
def delete_session(self) -> None:
"""Delete a session"""
requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
| [
"PLACEHOLDER"
] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~agents~format_scratchpad~test_xml.py | from libs.core.langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.xml import format_xml
def test_single_agent_action_observation() -> None:
# Arrange
agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
observation = "Observation1"
intermediate_steps = [(agent_action, observation)]
# Act
result = format_xml(intermediate_steps)
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation>"""
# Assert
assert result == expected_result
def test_multiple_agent_actions_observations() -> None:
# Arrange
agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1")
agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2")
observation1 = "Observation1"
observation2 = "Observation2"
intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)]
# Act
result = format_xml(intermediate_steps)
# Assert
expected_result = """<tool>Tool1</tool><tool_input>Input1\
</tool_input><observation>Observation1</observation><tool>\
Tool2</tool><tool_input>Input2</tool_input><observation>\
Observation2</observation>"""
assert result == expected_result
def test_empty_list_agent_actions() -> None:
result = format_xml([])
assert result == ""
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~retrievers~test_multi_vector.py | from typing import Any, List
from libs.core.langchain_core.documents import Document
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever(
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~voyageai.py | from __future__ import annotations
import json
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: VoyageEmbeddings) -> Callable[[Any], Any]:
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _check_response(response: dict) -> dict:
if "data" not in response:
raise RuntimeError(f"Voyage API Error. Message: {json.dumps(response)}")
return response
def embed_with_retry(embeddings: VoyageEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = requests.post(**kwargs)
return _check_response(response.json())
return _embed_with_retry(**kwargs)
class VoyageEmbeddings(BaseModel, Embeddings):
"""Voyage embedding models.
To use, you should have the environment variable ``VOYAGE_API_KEY`` set with
your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import VoyageEmbeddings
voyage = VoyageEmbeddings(voyage_api_key="your-api-key")
text = "This is a test query."
query_result = voyage.embed_query(text)
"""
model: str = "voyage-01"
voyage_api_base: str = "https://api.voyageai.com/v1/embeddings"
voyage_api_key: Optional[SecretStr] = None
batch_size: int = 8
"""Maximum number of texts to embed in each API request."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the API request."""
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding. Must have tqdm installed if set
to True."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["voyage_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "voyage_api_key", "VOYAGE_API_KEY")
)
return values
def _invocation_params(
self, input: List[str], input_type: Optional[str] = None
) -> Dict:
api_key = cast(SecretStr, self.voyage_api_key).get_secret_value()
params = {
"url": self.voyage_api_base,
"headers": {"Authorization": f"Bearer {api_key}"},
"json": {"model": self.model, "input": input, "input_type": input_type},
"timeout": self.request_timeout,
}
return params
def _get_embeddings(
self,
texts: List[str],
batch_size: Optional[int] = None,
input_type: Optional[str] = None,
) -> List[List[float]]:
embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
except ImportError as e:
raise ImportError(
"Must have tqdm installed if `show_progress_bar` is set to True. "
"Please install with `pip install tqdm`."
) from e
_iter = tqdm(range(0, len(texts), batch_size))
else:
_iter = range(0, len(texts), batch_size)
if input_type and input_type not in ["query", "document"]:
raise ValueError(
f"input_type {input_type} is invalid. Options: None, 'query', "
"'document'."
)
for i in _iter:
response = embed_with_retry(
self,
**self._invocation_params(
input=texts[i : i + batch_size], input_type=input_type
),
)
embeddings.extend(r["embedding"] for r in response["data"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._get_embeddings(
texts, batch_size=self.batch_size, input_type="document"
)
def embed_query(self, text: str) -> List[float]:
"""Call out to Voyage Embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return self._get_embeddings([text], input_type="query")[0]
def embed_general_texts(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding general text.
Args:
texts: The list of texts to embed.
input_type: Type of the input text. Default to None, meaning the type is
unspecified. Other options: query, document.
Returns:
Embedding for the text.
"""
return self._get_embeddings(
texts, batch_size=self.batch_size, input_type=input_type
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from libs.core.langchain_core.documents import Document
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~couchbase.py | import logging
from typing import Iterator, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class CouchbaseLoader(BaseLoader):
"""Load documents from `Couchbase`.
Each document represents one row of the result. The `page_content_fields` are
written into the `page_content`of the document. The `metadata_fields` are written
into the `metadata` of the document. By default, all columns are written into
the `page_content` and none into the `metadata`.
"""
def __init__(
self,
connection_string: str,
db_username: str,
db_password: str,
query: str,
*,
page_content_fields: Optional[List[str]] = None,
metadata_fields: Optional[List[str]] = None,
) -> None:
"""Initialize Couchbase document loader.
Args:
connection_string (str): The connection string to the Couchbase cluster.
db_username (str): The username to connect to the Couchbase cluster.
db_password (str): The password to connect to the Couchbase cluster.
query (str): The SQL++ query to execute.
page_content_fields (Optional[List[str]]): The columns to write into the
`page_content` field of the document. By default, all columns are
written.
metadata_fields (Optional[List[str]]): The columns to write into the
`metadata` field of the document. By default, no columns are written.
"""
try:
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster
from couchbase.options import ClusterOptions
except ImportError as e:
raise ImportError(
"Could not import couchbase package."
"Please install couchbase SDK with `pip install couchbase`."
) from e
if not connection_string:
raise ValueError("connection_string must be provided.")
if not db_username:
raise ValueError("db_username must be provided.")
if not db_password:
raise ValueError("db_password must be provided.")
auth = PasswordAuthenticator(
db_username,
db_password,
)
self.cluster: Cluster = Cluster(connection_string, ClusterOptions(auth))
self.query = query
self.page_content_fields = page_content_fields
self.metadata_fields = metadata_fields
def load(self) -> List[Document]:
"""Load Couchbase data into Document objects."""
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load Couchbase data into Document objects lazily."""
from datetime import timedelta
# Ensure connection to Couchbase cluster
self.cluster.wait_until_ready(timedelta(seconds=5))
# Run SQL++ Query
result = self.cluster.query(self.query)
for row in result:
metadata_fields = self.metadata_fields
page_content_fields = self.page_content_fields
if not page_content_fields:
page_content_fields = list(row.keys())
if not metadata_fields:
metadata_fields = []
metadata = {field: row[field] for field in metadata_fields}
document = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_fields
)
yield (Document(page_content=document, metadata=metadata))
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~office365~events_search.py | """Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEventsInput(BaseModel):
"""Input for SearchEmails Tool.
From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
start_datetime: str = Field(
description=(
" The start datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
end_datetime: str = Field(
description=(
" The end datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the event's body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small events, otherwise, set to "
"True."
),
)
class O365SearchEvents(O365BaseTool):
"""Class for searching calendar events in Office 365
Free, but setup is required
"""
name: str = "events_search"
args_schema: Type[BaseModel] = SearchEventsInput
description: str = (
" Use this tool to search for the user's calendar events."
" The input must be the start and end datetimes for the search query."
" The output is a JSON list of all the events in the user's calendar"
" between the start and end times. You can assume that the user can "
" not schedule any meeting over existing meetings, and that the user "
"is busy during meetings. Any times without events are free for the user. "
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
# Process the date range parameters
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
# Run the query
q = calendar.new_query("start").greater_equal(start_datetime_query)
q.chain("and").on_attribute("end").less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results)
# Generate output dict
output_events = []
for event in events:
output_event = {}
output_event["organizer"] = event.organizer
output_event["subject"] = event.subject
if truncate:
output_event["body"] = clean_body(event.body)[:truncate_limit]
else:
output_event["body"] = clean_body(event.body)
# Get the time zone from the search parameters
time_zone = start_datetime_query.tzinfo
# Assign the datetimes in the search time zone
output_event["start_datetime"] = event.start.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~file_management~file_search.py | import fnmatch
import os
from typing import Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.tools import BaseTool
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileToolMixin, BaseTool):
"""Tool that searches for files in a subdirectory that match a regex pattern."""
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(
self,
pattern: str,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_google_palm.py | """Test Google PaLM Chat API wrapper.
Note: This test must be run with the GOOGLE_API_KEY environment variable set to a
valid API key.
"""
from libs.core.langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatResult, LLMResult
from langchain_community.chat_models import ChatGooglePalm
def test_chat_google_palm() -> None:
"""Test Google PaLM Chat API wrapper."""
chat = ChatGooglePalm()
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_google_palm_system_message() -> None:
"""Test Google PaLM Chat API wrapper with system message."""
chat = ChatGooglePalm()
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_google_palm_generate() -> None:
"""Test Google PaLM Chat API wrapper with generate."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_chat_google_palm_multiple_completions() -> None:
"""Test Google PaLM Chat API wrapper with multiple completions."""
# The API de-dupes duplicate responses, so set temperature higher. This
# could be a flakey test though...
chat = ChatGooglePalm(n=5, temperature=1.0)
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
async def test_async_chat_google_palm() -> None:
"""Test async generation."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~steam.py | """Util that calls Steam-WebAPI."""
from typing import Any, List
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
class SteamWebAPIWrapper(BaseModel):
"""Wrapper for Steam API."""
steam: Any # for python-steam-api
from langchain_community.tools.steam.prompt import (
STEAM_GET_GAMES_DETAILS,
STEAM_GET_RECOMMENDED_GAMES,
)
# operations: a list of dictionaries, each representing a specific operation that
# can be performed with the API
operations: List[dict] = [
{
"mode": "get_game_details",
"name": "Get Game Details",
"description": STEAM_GET_GAMES_DETAILS,
},
{
"mode": "get_recommended_games",
"name": "Get Recommended Games",
"description": STEAM_GET_RECOMMENDED_GAMES,
},
]
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def get_operations(self) -> List[dict]:
"""Return a list of operations."""
return self.operations
@root_validator
def validate_environment(cls, values: dict) -> dict:
"""Validate api key and python package has been configured."""
# check if the python package is installed
try:
from steam import Steam
except ImportError:
raise ImportError("python-steam-api library is not installed. ")
try:
from decouple import config
except ImportError:
raise ImportError("decouple library is not installed. ")
# initialize the steam attribute for python-steam-api usage
KEY = config("STEAM_KEY")
steam = Steam(KEY)
values["steam"] = steam
return values
def parse_to_str(self, details: dict) -> str: # For later parsing
"""Parse the details result."""
result = ""
for key, value in details.items():
result += "The " + str(key) + " is: " + str(value) + "\n"
return result
def get_id_link_price(self, games: dict) -> dict:
"""The response may contain more than one game, so we need to choose the right
one and return the id."""
game_info = {}
for app in games["apps"]:
game_info["id"] = app["id"]
game_info["link"] = app["link"]
game_info["price"] = app["price"]
break
return game_info
def remove_html_tags(self, html_string: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_string, "html.parser")
return soup.get_text()
def details_of_games(self, name: str) -> str:
games = self.steam.apps.search_games(name)
info_partOne_dict = self.get_id_link_price(games)
info_partOne = self.parse_to_str(info_partOne_dict)
id = str(info_partOne_dict.get("id"))
info_dict = self.steam.apps.get_app_details(id)
data = info_dict.get(id).get("data")
detailed_description = data.get("detailed_description")
# detailed_description contains <li> <br> some other html tags, so we need to
# remove them
detailed_description = self.remove_html_tags(detailed_description)
supported_languages = info_dict.get(id).get("data").get("supported_languages")
info_partTwo = (
"The summary of the game is: "
+ detailed_description
+ "\n"
+ "The supported languages of the game are: "
+ supported_languages
+ "\n"
)
info = info_partOne + info_partTwo
return info
def get_steam_id(self, name: str) -> str:
user = self.steam.users.search_user(name)
steam_id = user["player"]["steamid"]
return steam_id
def get_users_games(self, steam_id: str) -> List[str]:
return self.steam.users.get_owned_games(steam_id, False, False)
def recommended_games(self, steam_id: str) -> str:
try:
import steamspypi
except ImportError:
raise ImportError("steamspypi library is not installed.")
users_games = self.get_users_games(steam_id)
result = {} # type: ignore
most_popular_genre = ""
most_popular_genre_count = 0
for game in users_games["games"]: # type: ignore
appid = game["appid"]
data_request = {"request": "appdetails", "appid": appid}
genreStore = steamspypi.download(data_request)
genreList = genreStore.get("genre", "").split(", ")
for genre in genreList:
if genre in result:
result[genre] += 1
else:
result[genre] = 1
if result[genre] > most_popular_genre_count:
most_popular_genre_count = result[genre]
most_popular_genre = genre
data_request = dict()
data_request["request"] = "genre"
data_request["genre"] = most_popular_genre
data = steamspypi.download(data_request)
sorted_data = sorted(
data.values(), key=lambda x: x.get("average_forever", 0), reverse=True
)
owned_games = [game["appid"] for game in users_games["games"]] # type: ignore
remaining_games = [
game for game in sorted_data if game["appid"] not in owned_games
]
top_5_popular_not_owned = [game["name"] for game in remaining_games[:5]]
return str(top_5_popular_not_owned)
def run(self, mode: str, game: str) -> str:
if mode == "get_games_details":
return self.details_of_games(game)
elif mode == "get_recommended_games":
return self.recommended_games(game)
else:
raise ValueError(f"Invalid mode {mode} for Steam API.")
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~playwright~extract_text.py | from __future__ import annotations
from typing import Optional, Type
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ExtractTextTool(BaseBrowserTool):
"""Tool for extracting all the text on the current webpage."""
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = BaseModel
@root_validator
def check_acheck_bs_importrgs(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
| [
"Extract all the text on the current webpage"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~konko.py | """KonkoAI chat wrapper."""
from __future__ import annotations
import logging
import os
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
import requests
from libs.core.langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import (
BaseChatModel,
generate_from_stream,
)
from libs.core.langchain_core.messages import AIMessageChunk, BaseMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.adapters.openai import (
convert_dict_to_message,
convert_message_to_dict,
)
from langchain_community.chat_models.openai import _convert_delta_to_message_chunk
DEFAULT_API_BASE = "https://api.konko.ai/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
logger = logging.getLogger(__name__)
class ChatKonko(BaseChatModel):
"""`ChatKonko` Chat large language models API.
To use, you should have the ``konko`` python package installed, and the
environment variable ``KONKO_API_KEY`` and ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the konko.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatKonko
llm = ChatKonko(model="meta-llama/Llama-2-13b-chat-hf")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"konko_api_key": "KONKO_API_KEY", "openai_api_key": "OPENAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
konko_api_key: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to Konko completion API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 20
"""Maximum number of tokens to generate."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["konko_api_key"] = get_from_dict_or_env(
values, "konko_api_key", "KONKO_API_KEY"
)
try:
import konko
except ImportError:
raise ValueError(
"Could not import konko python package. "
"Please install it with `pip install konko`."
)
try:
values["client"] = konko.ChatCompletion
except AttributeError:
raise ValueError(
"`konko` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Konko API."""
return {
"model": self.model,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
@staticmethod
def get_available_models(
konko_api_key: Optional[str] = None,
openai_api_key: Optional[str] = None,
konko_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Konko API."""
# Try to retrieve the OpenAI API key if it's not passed as an argument
if not openai_api_key:
try:
openai_api_key = os.environ["OPENAI_API_KEY"]
except KeyError:
pass # It's okay if it's not set, we just won't use it
# Try to retrieve the Konko API key if it's not passed as an argument
if not konko_api_key:
try:
konko_api_key = os.environ["KONKO_API_KEY"]
except KeyError:
raise ValueError(
"Konko API key must be passed as keyword argument or "
"set in environment variable KONKO_API_KEY."
)
models_url = f"{konko_api_base}/models"
headers = {
"Authorization": f"Bearer {konko_api_key}",
}
if openai_api_key:
headers["X-OpenAI-Api-Key"] = openai_api_key
models_response = requests.get(models_url, headers=headers)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}"
)
return {model["id"] for model in models_response.json()["data"]}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model}, **self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the konko client."""
return {**self._default_params}
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "konko-chat"
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~cypher.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema
from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT
from langchain.chains.llm import LLMChain
from langchain.graphs.graph_store import GraphStore
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def extract_cypher(text: str) -> str:
"""Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
def construct_schema(
structured_schema: Dict[str, Any],
include_types: List[str],
exclude_types: List[str],
) -> str:
"""Filter the schema based on included or excluded types"""
def filter_func(x: str) -> bool:
return x in include_types if include_types else x not in exclude_types
filtered_schema: Dict[str, Any] = {
"node_props": {
k: v
for k, v in structured_schema.get("node_props", {}).items()
if filter_func(k)
},
"rel_props": {
k: v
for k, v in structured_schema.get("rel_props", {}).items()
if filter_func(k)
},
"relationships": [
r
for r in structured_schema.get("relationships", [])
if all(filter_func(r[t]) for t in ["start", "end", "type"])
],
}
# Format node properties
formatted_node_props = []
for label, properties in filtered_schema["node_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_node_props.append(f"{label} {{{props_str}}}")
# Format relationship properties
formatted_rel_props = []
for rel_type, properties in filtered_schema["rel_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_rel_props.append(f"{rel_type} {{{props_str}}}")
# Format relationships
formatted_rels = [
f"(:{el['start']})-[:{el['type']}]->(:{el['end']})"
for el in filtered_schema["relationships"]
]
return "\n".join(
[
"Node properties are the following:",
",".join(formatted_node_props),
"Relationship properties are the following:",
",".join(formatted_rel_props),
"The relationships are the following:",
",".join(formatted_rels),
]
)
class GraphCypherQAChain(Chain):
"""Chain for question-answering against a graph by generating Cypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: GraphStore = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
graph_schema: str
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
"""Number of results to return from the query"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
cypher_query_corrector: Optional[CypherQueryCorrector] = None
"""Optional cypher validation tool"""
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@property
def _chain_type(self) -> str:
return "graph_cypher_chain"
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
*,
qa_prompt: Optional[BasePromptTemplate] = None,
cypher_prompt: Optional[BasePromptTemplate] = None,
cypher_llm: Optional[BaseLanguageModel] = None,
qa_llm: Optional[BaseLanguageModel] = None,
exclude_types: List[str] = [],
include_types: List[str] = [],
validate_cypher: bool = False,
qa_llm_kwargs: Optional[Dict[str, Any]] = None,
cypher_llm_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> GraphCypherQAChain:
"""Initialize from LLM."""
if not cypher_llm and not llm:
raise ValueError("Either `llm` or `cypher_llm` parameters must be provided")
if not qa_llm and not llm:
raise ValueError("Either `llm` or `qa_llm` parameters must be provided")
if cypher_llm and qa_llm and llm:
raise ValueError(
"You can specify up to two of 'cypher_llm', 'qa_llm'"
", and 'llm', but not all three simultaneously."
)
if cypher_prompt and cypher_llm_kwargs:
raise ValueError(
"Specifying cypher_prompt and cypher_llm_kwargs together is"
" not allowed. Please pass prompt via cypher_llm_kwargs."
)
if qa_prompt and qa_llm_kwargs:
raise ValueError(
"Specifying qa_prompt and qa_llm_kwargs together is"
" not allowed. Please pass prompt via qa_llm_kwargs."
)
use_qa_llm_kwargs = qa_llm_kwargs if qa_llm_kwargs is not None else {}
use_cypher_llm_kwargs = (
cypher_llm_kwargs if cypher_llm_kwargs is not None else {}
)
if "prompt" not in use_qa_llm_kwargs:
use_qa_llm_kwargs["prompt"] = (
qa_prompt if qa_prompt is not None else CYPHER_QA_PROMPT
)
if "prompt" not in use_cypher_llm_kwargs:
use_cypher_llm_kwargs["prompt"] = (
cypher_prompt if cypher_prompt is not None else CYPHER_GENERATION_PROMPT
)
qa_chain = LLMChain(llm=qa_llm or llm, **use_qa_llm_kwargs)
cypher_generation_chain = LLMChain(
llm=cypher_llm or llm, **use_cypher_llm_kwargs
)
if exclude_types and include_types:
raise ValueError(
"Either `exclude_types` or `include_types` "
"can be provided, but not both"
)
graph_schema = construct_schema(
kwargs["graph"].get_structured_schema, include_types, exclude_types
)
cypher_query_corrector = None
if validate_cypher:
corrector_schema = [
Schema(el["start"], el["type"], el["end"])
for el in kwargs["graph"].structured_schema.get("relationships")
]
cypher_query_corrector = CypherQueryCorrector(corrector_schema)
return cls(
graph_schema=graph_schema,
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
cypher_query_corrector=cypher_query_corrector,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph_schema}, callbacks=callbacks
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
# Correct Cypher query if enabled
if self.cypher_query_corrector:
generated_cypher = self.cypher_query_corrector(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
# Retrieve and limit the number of results
# Generated Cypher be null if query corrector identifies invalid schema
if generated_cypher:
context = self.graph.query(generated_cypher)[: self.top_k]
else:
context = []
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~document_loaders~test_json_loader.py | import io
from typing import Any, Dict
import pytest
from libs.core.langchain_core.documents import Document
from pytest import raises
from pytest_mock import MockerFixture
from langchain_community.document_loaders.json_loader import JSONLoader
pytestmark = pytest.mark.requires("jq")
def test_load_valid_string_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value='[{"text": "value1"}, {"text": "value2"}]',
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].text", text_content=True)
result = loader.load()
assert result == expected_docs
def test_load_valid_dict_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content='{"text": "value1"}',
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content='{"text": "value2"}',
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[{"text": "value1"}, {"text": "value2"}]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[]", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_valid_bool_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="False",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="True",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[
{"flag": false}, {"flag": true}
]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].flag", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_valid_numeric_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="99",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="99.5",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[
{"num": 99}, {"num": 99.5}
]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].num", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_invalid_test_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[{"text": "value1"}, {"text": "value2"}]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[]", text_content=True)
with raises(ValueError):
loader.load()
def test_load_jsonlines(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
)
loader = JSONLoader(
file_path=file_path, jq_schema=".", content_key="text", json_lines=True
)
result = loader.load()
assert result == expected_docs
@pytest.mark.parametrize(
"params",
(
{"jq_schema": ".[].text"},
{"jq_schema": ".[]", "content_key": "text"},
),
)
def test_load_jsonlines_list(params: Dict, mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
Document(
page_content="value3",
metadata={"source": file_path, "seq_num": 3},
),
Document(
page_content="value4",
metadata={"source": file_path, "seq_num": 4},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
[{"text": "value1"}, {"text": "value2"}]
[{"text": "value3"}, {"text": "value4"}]
"""
),
)
loader = JSONLoader(file_path=file_path, json_lines=True, **params)
result = loader.load()
assert result == expected_docs
def test_load_empty_jsonlines(mocker: MockerFixture) -> None:
mocker.patch("pathlib.Path.open", return_value=io.StringIO(""))
loader = JSONLoader(file_path="file_path", jq_schema=".[].text", json_lines=True)
result = loader.load()
assert result == []
@pytest.mark.parametrize(
"patch_func,patch_func_value,kwargs",
(
# JSON content.
(
"pathlib.Path.read_text",
'[{"text": "value1"}, {"text": "value2"}]',
{"jq_schema": ".[]", "content_key": "text"},
),
# JSON Lines content.
(
"pathlib.Path.open",
io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
{"jq_schema": ".", "content_key": "text", "json_lines": True},
),
),
)
def test_json_meta_01(
patch_func: str, patch_func_value: Any, kwargs: Dict, mocker: MockerFixture
) -> None:
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1, "x": "value1-meta"},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2, "x": "value2-meta"},
),
]
def metadata_func(record: Dict, metadata: Dict) -> Dict:
metadata["x"] = f"{record['text']}-meta"
return metadata
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func, **kwargs)
result = loader.load()
assert result == expected_docs
@pytest.mark.parametrize(
"patch_func,patch_func_value,kwargs",
(
# JSON content.
(
"pathlib.Path.read_text",
'[{"text": "value1"}, {"text": "value2"}]',
{"jq_schema": ".[]", "content_key": "text"},
),
# JSON Lines content.
(
"pathlib.Path.open",
io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
{"jq_schema": ".", "content_key": "text", "json_lines": True},
),
),
)
def test_json_meta_02(
patch_func: str, patch_func_value: Any, kwargs: Dict, mocker: MockerFixture
) -> None:
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1, "x": "value1-meta"},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2, "x": "value2-meta"},
),
]
def metadata_func(record: Dict, metadata: Dict) -> Dict:
return {**metadata, "x": f"{record['text']}-meta"}
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func, **kwargs)
result = loader.load()
assert result == expected_docs
| [
"text"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.