id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
4acc52497869-0 | Source code for langchain.vectorstores.typesense
"""Wrapper around Typesense vector search"""
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from typesense.client import Client
from typesense.collection import Collection
[docs]class Typesense(VectorStore):
"""Wrapper around Typesense vector search.
To use, you should have the ``typesense`` python package installed.
Example:
.. code-block:: python
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
import typesense
node = {
"host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net
"port": "8108", # For Typesense Cloud use 443
"protocol": "http" # For Typesense Cloud use https
}
typesense_client = typesense.Client(
{
"nodes": [node],
"api_key": "<API_KEY>",
"connection_timeout_seconds": 2
}
)
typesense_collection_name = "langchain-memory"
embedding = OpenAIEmbeddings()
vectorstore = Typesense(
typesense_client,
typesense_collection_name,
embedding.embed_query,
"text",
)
"""
def __init__(
self,
typesense_client: Client,
embedding: Embeddings,
*,
typesense_collection_name: Optional[str] = None, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
4acc52497869-1 | *,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
):
"""Initialize with Typesense client."""
try:
from typesense import Client
except ImportError:
raise ValueError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
)
if not isinstance(typesense_client, Client):
raise ValueError(
f"typesense_client should be an instance of typesense.Client, "
f"got {type(typesense_client)}"
)
self._typesense_client = typesense_client
self._embedding = embedding
self._typesense_collection_name = (
typesense_collection_name or f"langchain-{str(uuid.uuid4())}"
)
self._text_key = text_key
@property
def _collection(self) -> Collection:
return self._typesense_client.collections[self._typesense_collection_name]
def _prep_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]],
ids: Optional[List[str]],
) -> List[dict]:
"""Embed and create the documents"""
_ids = ids or (str(uuid.uuid4()) for _ in texts)
_metadatas: Iterable[dict] = metadatas or ({} for _ in texts)
embedded_texts = self._embedding.embed_documents(list(texts))
return [
{"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata}
for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas)
] | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
4acc52497869-2 | ]
def _create_collection(self, num_dim: int) -> None:
fields = [
{"name": "vec", "type": "float[]", "num_dim": num_dim},
{"name": f"{self._text_key}", "type": "string"},
{"name": ".*", "type": "auto"},
]
self._typesense_client.collections.create(
{"name": self._typesense_collection_name, "fields": fields}
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from typesense.exceptions import ObjectNotFound
docs = self._prep_texts(texts, metadatas, ids)
try:
self._collection.documents.import_(docs, {"action": "upsert"})
except ObjectNotFound:
# Create the collection if it doesn't already exist
self._create_collection(len(docs[0]["vec"]))
self._collection.documents.import_(docs, {"action": "upsert"})
return [doc["id"] for doc in docs]
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
4acc52497869-3 | self,
query: str,
k: int = 4,
filter: Optional[str] = "",
) -> List[Tuple[Document, float]]:
"""Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {
"q": "*",
"vector_query": f'vec:([{",".join(embedded_query)}], k:{k})',
"filter_by": filter,
"collection": self._typesense_collection_name,
}
docs = []
response = self._typesense_client.multi_search.perform(
{"searches": [query_obj]}, {}
)
for hit in response["results"][0]["hits"]:
document = hit["document"]
metadata = document["metadata"]
text = document[self._text_key]
score = hit["vector_distance"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[str] = "",
**kwargs: Any,
) -> List[Document]:
"""Return typesense documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
4acc52497869-4 | k: Number of Documents to return. Defaults to 4.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter)
return [doc for doc, _ in docs_and_score]
[docs] @classmethod
def from_client_params(
cls,
embedding: Embeddings,
*,
host: str = "localhost",
port: Union[str, int] = "8108",
protocol: str = "http",
typesense_api_key: Optional[str] = None,
connection_timeout_seconds: int = 2,
**kwargs: Any,
) -> Typesense:
"""Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
"""
try:
from typesense import Client
except ImportError:
raise ValueError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
)
node = {
"host": host,
"port": str(port),
"protocol": protocol,
}
typesense_api_key = typesense_api_key or get_from_env( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
4acc52497869-5 | }
typesense_api_key = typesense_api_key or get_from_env(
"typesense_api_key", "TYPESENSE_API_KEY"
)
client_config = {
"nodes": [node],
"api_key": typesense_api_key,
"connection_timeout_seconds": connection_timeout_seconds,
}
return cls(Client(client_config), embedding, **kwargs)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
typesense_client: Optional[Client] = None,
typesense_client_params: Optional[dict] = None,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
**kwargs: Any,
) -> Typesense:
"""Construct Typesense wrapper from raw text."""
if typesense_client:
vectorstore = cls(typesense_client, embedding, **kwargs)
elif typesense_client_params:
vectorstore = cls.from_client_params(
embedding, **typesense_client_params, **kwargs
)
else:
raise ValueError(
"Must specify one of typesense_client or typesense_client_params."
)
vectorstore.add_texts(texts, metadatas=metadatas, ids=ids)
return vectorstore
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
60ad5c3c5adc-0 | Source code for langchain.vectorstores.singlestoredb
"""Wrapper around SingleStore DB."""
from __future__ import annotations
import json
from typing import (
Any,
ClassVar,
Collection,
Iterable,
List,
Optional,
Tuple,
Type,
)
from sqlalchemy.pool import QueuePool
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
[docs]class SingleStoreDB(VectorStore):
"""
This class serves as a Pythonic interface to the SingleStore DB database.
The prerequisite for using this class is the installation of the ``singlestoredb``
Python package.
The SingleStoreDB vectorstore can be created by providing an embedding function and
the relevant parameters for the database connection, connection pool, and
optionally, the names of the table and the fields to use.
"""
def _get_connection(self: SingleStoreDB) -> Any:
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
"Could not import singlestoredb python package. "
"Please install it with `pip install singlestoredb`."
)
return s2.connect(**self.connection_kwargs)
def __init__(
self,
embedding: Embeddings,
*,
table_name: str = "embeddings",
content_field: str = "content",
metadata_field: str = "metadata",
vector_field: str = "vector",
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30,
**kwargs: Any,
): | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-1 | timeout: float = 30,
**kwargs: Any,
):
"""Initialize with necessary components.
Args:
embedding (Embeddings): A text embedding model.
table_name (str, optional): Specifies the name of the table in use.
Defaults to "embeddings".
content_field (str, optional): Specifies the field to store the content.
Defaults to "content".
metadata_field (str, optional): Specifies the field to store metadata.
Defaults to "metadata".
vector_field (str, optional): Specifies the field to store the vector.
Defaults to "vector".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-2 | local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="https://user:[email protected]:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-3 | .. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db'
vectorstore = SingleStoreDB(OpenAIEmbeddings())
"""
self.embedding = embedding
self.table_name = table_name
self.content_field = content_field
self.metadata_field = metadata_field
self.vector_field = vector_field
"""Pass the rest of the kwargs to the connection."""
self.connection_kwargs = kwargs
"""Create connection pool."""
self.connection_pool = QueuePool(
self._get_connection,
max_overflow=max_overflow,
pool_size=pool_size,
timeout=timeout,
)
self._create_table()
def _create_table(self: SingleStoreDB) -> None:
"""Create table if it doesn't exist."""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""CREATE TABLE IF NOT EXISTS {}
({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci,
{} BLOB, {} JSON);""".format( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-4 | {} BLOB, {} JSON);""".format(
self.table_name,
self.content_field,
self.vector_field,
self.metadata_field,
),
)
finally:
cur.close()
finally:
conn.close()
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
Returns:
List[str]: empty list
"""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
# Write data to singlestore db
for i, text in enumerate(texts):
# Use provided values by default or fallback
metadata = metadatas[i] if metadatas else {}
embedding = (
embeddings[i]
if embeddings
else self.embedding.embed_documents([text])[0]
)
cur.execute(
"INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format(
self.table_name
),
(
text,
"[{}]".format(",".join(map(str, embedding))),
json.dumps(metadata),
),
)
finally:
cur.close()
finally:
conn.close() | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-5 | finally:
cur.close()
finally:
conn.close()
return []
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Returns the most similar indexed documents to the query text.
Uses cosine similarity.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query. Uses cosine similarity.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
# Creates embedding vector from user query
embedding = self.embedding.embed_query(query)
conn = self.connection_pool.connect()
result = []
try:
cur = conn.cursor()
try:
cur.execute(
"""SELECT {}, {}, DOT_PRODUCT({}, JSON_ARRAY_PACK(%s)) as __score
FROM {} ORDER BY __score DESC LIMIT %s""".format(
self.content_field,
self.metadata_field,
self.vector_field,
self.table_name,
),
( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-6 | self.vector_field,
self.table_name,
),
(
"[{}]".format(",".join(map(str, embedding))),
k,
),
)
for row in cur.fetchall():
doc = Document(page_content=row[0], metadata=row[1])
result.append((doc, float(row[2])))
finally:
cur.close()
finally:
conn.close()
return result
[docs] @classmethod
def from_texts(
cls: Type[SingleStoreDB],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
table_name: str = "embeddings",
content_field: str = "content",
metadata_field: str = "metadata",
vector_field: str = "vector",
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30,
**kwargs: Any,
) -> SingleStoreDB:
"""Create a SingleStoreDB vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new table for the embeddings in SingleStoreDB.
3. Adds the documents to the newly created table.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import SingleStoreDB
from langchain.embeddings import OpenAIEmbeddings
s2 = SingleStoreDB.from_texts(
texts,
OpenAIEmbeddings(),
host="username:password@localhost:3306/database"
)
"""
instance = cls(
embedding, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
60ad5c3c5adc-7 | )
"""
instance = cls(
embedding,
table_name=table_name,
content_field=content_field,
metadata_field=metadata_field,
vector_field=vector_field,
pool_size=pool_size,
max_overflow=max_overflow,
timeout=timeout,
**kwargs,
)
instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs)
return instance
[docs] def as_retriever(self, **kwargs: Any) -> SingleStoreDBRetriever:
return SingleStoreDBRetriever(vectorstore=self, **kwargs)
class SingleStoreDBRetriever(VectorStoreRetriever):
vectorstore: SingleStoreDB
k: int = 4
allowed_search_types: ClassVar[Collection[str]] = ("similarity",)
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError(
"SingleStoreDBVectorStoreRetriever does not support async"
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html |
bc839bff3176-0 | Source code for langchain.vectorstores.vectara
"""Wrapper around Vectara vector database."""
from __future__ import annotations
import json
import logging
import os
from hashlib import md5
from typing import Any, Iterable, List, Optional, Tuple, Type
import requests
from pydantic import Field
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
[docs]class Vectara(VectorStore):
"""Implementation of Vector Store using Vectara (https://vectara.com).
Example:
.. code-block:: python
from langchain.vectorstores import Vectara
vectorstore = Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key
)
"""
def __init__(
self,
vectara_customer_id: Optional[str] = None,
vectara_corpus_id: Optional[str] = None,
vectara_api_key: Optional[str] = None,
):
"""Initialize with Vectara API."""
self._vectara_customer_id = vectara_customer_id or os.environ.get(
"VECTARA_CUSTOMER_ID"
)
self._vectara_corpus_id = vectara_corpus_id or os.environ.get(
"VECTARA_CORPUS_ID"
)
self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY")
if (
self._vectara_customer_id is None
or self._vectara_corpus_id is None
or self._vectara_api_key is None
):
logging.warning( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-1 | or self._vectara_api_key is None
):
logging.warning(
"Cant find Vectara credentials, customer_id or corpus_id in "
"environment."
)
else:
logging.debug(f"Using corpus id {self._vectara_corpus_id}")
self._session = requests.Session() # to reuse connections
adapter = requests.adapters.HTTPAdapter(max_retries=3)
self._session.mount("http://", adapter)
def _get_post_headers(self) -> dict:
"""Returns headers that should be attached to each post request."""
return {
"x-api-key": self._vectara_api_key,
"customer-id": self._vectara_customer_id,
"Content-Type": "application/json",
}
def _delete_doc(self, doc_id: str) -> bool:
"""
Delete a document from the Vectara corpus.
Args:
url (str): URL of the page to delete.
doc_id (str): ID of the document to delete.
Returns:
bool: True if deletion was successful, False otherwise.
"""
body = {
"customer_id": self._vectara_customer_id,
"corpus_id": self._vectara_corpus_id,
"document_id": doc_id,
}
response = self._session.post(
"https://api.vectara.io/v1/delete-doc",
data=json.dumps(body),
verify=True,
headers=self._get_post_headers(),
)
if response.status_code != 200:
logging.error(
f"Delete request failed for doc_id = {doc_id} with status code "
f"{response.status_code}, reason {response.reason}, text " | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-2 | f"{response.status_code}, reason {response.reason}, text "
f"{response.text}"
)
return False
return True
def _index_doc(self, doc: dict) -> bool:
request: dict[str, Any] = {}
request["customer_id"] = self._vectara_customer_id
request["corpus_id"] = self._vectara_corpus_id
request["document"] = doc
response = self._session.post(
headers=self._get_post_headers(),
url="https://api.vectara.io/v1/core/index",
data=json.dumps(request),
timeout=30,
verify=True,
)
status_code = response.status_code
result = response.json()
status_str = result["status"]["code"] if "status" in result else None
if status_code == 409 or (status_str and status_str == "ALREADY_EXISTS"):
return False
else:
return True
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
doc_hash = md5()
for t in texts:
doc_hash.update(t.encode())
doc_id = doc_hash.hexdigest()
if metadatas is None:
metadatas = [{} for _ in texts]
doc = { | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-3 | metadatas = [{} for _ in texts]
doc = {
"document_id": doc_id,
"metadataJson": json.dumps({"source": "langchain"}),
"parts": [
{"text": text, "metadataJson": json.dumps(md)}
for text, md in zip(texts, metadatas)
],
}
succeeded = self._index_doc(doc)
if not succeeded:
self._delete_doc(doc_id)
self._index_doc(doc)
return [doc_id]
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 5,
lambda_val: float = 0.025,
filter: Optional[str] = None,
n_sentence_context: int = 0,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
lambda_val: lexical match parameter for hybrid search.
filter: Dictionary of argument(s) to filter on metadata. For example a
filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see
https://docs.vectara.com/docs/search-apis/sql/filter-overview
for more details.
n_sentence_context: number of sentences before/after the matching segment
to add
Returns:
List of Documents most similar to the query and score for each.
"""
data = json.dumps(
{
"query": [
{
"query": query, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-4 | {
"query": [
{
"query": query,
"start": 0,
"num_results": k,
"context_config": {
"sentences_before": n_sentence_context,
"sentences_after": n_sentence_context,
},
"corpus_key": [
{
"customer_id": self._vectara_customer_id,
"corpus_id": self._vectara_corpus_id,
"metadataFilter": filter,
"lexical_interpolation_config": {"lambda": lambda_val},
}
],
}
]
}
)
response = self._session.post(
headers=self._get_post_headers(),
url="https://api.vectara.io/v1/query",
data=data,
timeout=10,
)
if response.status_code != 200:
logging.error(
"Query failed %s",
f"(code {response.status_code}, reason {response.reason}, details "
f"{response.text})",
)
return []
result = response.json()
responses = result["responseSet"][0]["response"]
vectara_default_metadata = ["lang", "len", "offset"]
docs = [
(
Document(
page_content=x["text"],
metadata={
m["name"]: m["value"]
for m in x["metadata"]
if m["name"] not in vectara_default_metadata
},
),
x["score"],
)
for x in responses
]
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 5, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-5 | self,
query: str,
k: int = 5,
lambda_val: float = 0.025,
filter: Optional[str] = None,
n_sentence_context: int = 0,
**kwargs: Any,
) -> List[Document]:
"""Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
filter: Dictionary of argument(s) to filter on metadata. For example a
filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see
https://docs.vectara.com/docs/search-apis/sql/filter-overview for more
details.
n_sentence_context: number of sentences before/after the matching segment
to add
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
lamnbda_val=lambda_val,
filter=filter,
n_sentence_context=n_sentence_context,
**kwargs,
)
return [doc for doc, _ in docs_and_scores]
[docs] @classmethod
def from_texts(
cls: Type[Vectara],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Vectara:
"""Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Vectara | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-6 | Example:
.. code-block:: python
from langchain import Vectara
vectara = Vectara.from_texts(
texts,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
"""
# Note: Vectara generates its own embeddings, so we ignore the provided
# embeddings (required by interface)
vectara = cls(**kwargs)
vectara.add_texts(texts, metadatas)
return vectara
[docs] def as_retriever(self, **kwargs: Any) -> VectaraRetriever:
return VectaraRetriever(vectorstore=self, **kwargs)
class VectaraRetriever(VectorStoreRetriever):
vectorstore: Vectara
search_kwargs: dict = Field(
default_factory=lambda: {
"lambda_val": 0.025,
"k": 5,
"filter": "",
"n_sentence_context": "0",
}
)
"""Search params.
k: Number of Documents to return. Defaults to 5.
lambda_val: lexical match parameter for hybrid search.
filter: Dictionary of argument(s) to filter on metadata. For example a
filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see
https://docs.vectara.com/docs/search-apis/sql/filter-overview
for more details.
n_sentence_context: number of sentences before/after the matching segment to add
"""
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Vectara vectorstore. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
bc839bff3176-7 | ) -> None:
"""Add text to the Vectara vectorstore.
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.vectorstore.add_texts(texts, metadatas)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html |
e4b7ea968c42-0 | Source code for langchain.vectorstores.analyticdb
"""VectorStore wrapper around a Postgres/PGVector database."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple
import sqlalchemy
from sqlalchemy import REAL, Index
from sqlalchemy.dialects.postgresql import ARRAY, JSON, UUID
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, relationship
from sqlalchemy.sql.expression import func
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
Base = declarative_base() # type: Any
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
class CollectionStore(BaseModel):
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first() # type: ignore
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-1 | """
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
__tablename__ = "langchain_pg_embedding"
collection_id = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding: sqlalchemy.Column = sqlalchemy.Column(ARRAY(REAL))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
# The following line creates an index named 'langchain_pg_embedding_vector_idx'
langchain_pg_embedding_vector_idx = Index(
"langchain_pg_embedding_vector_idx",
embedding,
postgresql_using="ann",
postgresql_with={
"distancemeasure": "L2",
"dim": 1536,
"pq_segments": 64,
"hnsw_m": 100,
"pq_centers": 2048,
},
)
class QueryResult:
EmbeddingStore: EmbeddingStore
distance: float
[docs]class AnalyticDB(VectorStore):
"""
VectorStore implementation using AnalyticDB. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-2 | """
VectorStore implementation using AnalyticDB.
AnalyticDB is a distributed full PostgresSQL syntax cloud-native database.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self._conn = self.connect()
self.create_tables_if_not_exists()
self.create_collection()
[docs] def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-3 | engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
[docs] def create_tables_if_not_exists(self) -> None:
Base.metadata.create_all(self._conn)
[docs] def drop_tables(self) -> None:
Base.metadata.drop_all(self._conn)
[docs] def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
[docs] def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.error("Collection not found")
return
session.delete(collection)
session.commit()
[docs] def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return CollectionStore.get_by_name(session, self.collection_name)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids is None: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-4 | """
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AnalyticDB with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-5 | self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
[docs] def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
filter_by_metadata = EmbeddingStore.cmetadata[key].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
results: List[QueryResult] = (
session.query(
EmbeddingStore,
func.l2_distance(EmbeddingStore.embedding, embedding).label("distance"),
)
.filter(filter_by) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-6 | )
.filter(filter_by)
.order_by(EmbeddingStore.embedding.op("<->")(embedding))
.join(
CollectionStore,
EmbeddingStore.collection_id == CollectionStore.uuid,
)
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-7 | pre_delete_collection: bool = False,
**kwargs: Any,
) -> AnalyticDB:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
pre_delete_collection=pre_delete_collection,
)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs)
return store
[docs] @classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
[docs] @classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> AnalyticDB:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
""" | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
e4b7ea968c42-8 | or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
[docs] @classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
40fade8f9055-0 | Source code for langchain.vectorstores.faiss
"""Wrapper around FAISS vector database."""
from __future__ import annotations
import math
import os
import pickle
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
def dependable_faiss_import(no_avx2: Optional[bool] = None) -> Any:
"""
Import faiss if available, otherwise raise error.
If FAISS_NO_AVX2 environment variable is set, it will be considered
to load FAISS with no AVX2 optimization.
Args:
no_avx2: Load FAISS strictly with no AVX2 optimization
so that the vectorstore is portable and compatible with other devices.
"""
if no_avx2 is None and "FAISS_NO_AVX2" in os.environ:
no_avx2 = bool(os.getenv("FAISS_NO_AVX2"))
try:
if no_avx2:
from faiss import swigfaiss as faiss
else:
import faiss
except ImportError:
raise ValueError(
"Could not import faiss python package. "
"Please install it with `pip install faiss` "
"or `pip install faiss-cpu` (depending on Python version)."
)
return faiss
def _default_relevance_score_fn(score: float) -> float: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-1 | return faiss
def _default_relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# The 'correct' relevance function
# may differ depending on a few things, including:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
# - embedding dimensionality
# - etc.
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
return 1.0 - score / math.sqrt(2)
[docs]class FAISS(VectorStore):
"""Wrapper around FAISS vector database.
To use, you should have the ``faiss`` python package installed.
Example:
.. code-block:: python
from langchain import FAISS
faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id)
"""
def __init__(
self,
embedding_function: Callable,
index: Any,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_relevance_score_fn,
normalize_L2: bool = False,
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
self.relevance_score_fn = relevance_score_fn
self._normalize_L2 = normalize_L2 | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-2 | self._normalize_L2 = normalize_L2
def __add(
self,
texts: Iterable[str],
embeddings: Iterable[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
# Add to the index, the index_to_id mapping, and the docstore.
starting_len = len(self.index_to_docstore_id)
faiss = dependable_faiss_import()
vector = np.array(embeddings, dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
self.index.add(vector)
# Get list of index, id, and docs.
full_info = [(starting_len + i, ids[i], doc) for i, doc in enumerate(documents)]
# Add information to docstore and index.
self.docstore.add({_id: doc for _, _id, doc in full_info})
index_to_id = {index: _id for index, _id, _ in full_info}
self.index_to_docstore_id.update(index_to_id)
return [_id for _, _id, _ in full_info] | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-3 | return [_id for _, _id, _ in full_info]
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
# Embed and create the documents.
embeddings = [self.embedding_function(text) for text in texts]
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
[docs] def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-4 | ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
# Embed and create the documents.
texts, embeddings = zip(*text_embeddings)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
[docs] def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}") | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-5 | raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append((doc, scores[0][j]))
return docs
[docs] def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query text with
L2 distance in float. Lower score represents more similarity.
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k)
return docs
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-6 | k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k)
return [doc for doc, _ in docs_and_scores]
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
_, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k)
# -1 happens when not enough docs are returned.
embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-7 | embeddings,
k=k,
lambda_mult=lambda_mult,
)
selected_indices = [indices[0][i] for i in mmr_selected]
docs = []
for i in selected_indices:
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append(doc)
return docs
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-8 | embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
[docs] def merge_from(self, target: FAISS) -> None:
"""Merge another FAISS object with the current one.
Add the target FAISS to the current one.
Args:
target: FAISS object you wish to merge into the current one
Returns:
None.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError("Cannot merge with this type of docstore")
# Numerical index for target docs are incremental on existing ones
starting_len = len(self.index_to_docstore_id)
# Merge two IndexFlatL2
self.index.merge_from(target.index)
# Get id and docs from target FAISS object
full_info = []
for i, target_id in target.index_to_docstore_id.items():
doc = target.docstore.search(target_id)
if not isinstance(doc, Document):
raise ValueError("Document should be returned")
full_info.append((starting_len + i, target_id, doc))
# Add information to docstore and index_to_docstore_id.
self.docstore.add({_id: doc for _, _id, doc in full_info})
index_to_id = {index: _id for index, _id, _ in full_info}
self.index_to_docstore_id.update(index_to_id)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
normalize_L2: bool = False,
**kwargs: Any,
) -> FAISS: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-9 | **kwargs: Any,
) -> FAISS:
faiss = dependable_faiss_import()
index = faiss.IndexFlatL2(len(embeddings[0]))
vector = np.array(embeddings, dtype=np.float32)
if normalize_L2:
faiss.normalize_L2(vector)
index.add(vector)
documents = []
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = dict(enumerate(ids))
docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents)))
return cls(
embedding.embed_query,
index,
docstore,
index_to_id,
normalize_L2=normalize_L2,
**kwargs,
)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> FAISS:
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import FAISS
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings() | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-10 | embeddings = OpenAIEmbeddings()
faiss = FAISS.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
)
[docs] @classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> FAISS:
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import FAISS
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-11 | ids=ids,
**kwargs,
)
[docs] def save_local(self, folder_path: str, index_name: str = "index") -> None:
"""Save FAISS index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
index_name: for saving with a specific index file name
"""
path = Path(folder_path)
path.mkdir(exist_ok=True, parents=True)
# save index separately since it is not picklable
faiss = dependable_faiss_import()
faiss.write_index(
self.index, str(path / "{index_name}.faiss".format(index_name=index_name))
)
# save docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f:
pickle.dump((self.docstore, self.index_to_docstore_id), f)
[docs] @classmethod
def load_local(
cls, folder_path: str, embeddings: Embeddings, index_name: str = "index"
) -> FAISS:
"""Load FAISS index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
"""
path = Path(folder_path)
# load index separately since it is not picklable
faiss = dependable_faiss_import()
index = faiss.read_index( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
40fade8f9055-12 | faiss = dependable_faiss_import()
index = faiss.read_index(
str(path / "{index_name}.faiss".format(index_name=index_name))
)
# load docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f:
docstore, index_to_docstore_id = pickle.load(f)
return cls(embeddings.embed_query, index, docstore, index_to_docstore_id)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and their similarity scores on a scale from 0 to 1."""
if self.relevance_score_fn is None:
raise ValueError(
"normalize_score_fn must be provided to"
" FAISS constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
a52838bcc4b3-0 | Source code for langchain.vectorstores.supabase
from __future__ import annotations
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import supabase
[docs]class SupabaseVectorStore(VectorStore):
"""VectorStore for a Supabase postgres database. Assumes you have the `pgvector`
extension installed and a `match_documents` (or similar) function. For more details:
https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
You can implement your own `match_documents` function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
If you'd like to use `max_marginal_relevance_search`, please review the instructions
below on modifying the `match_documents` function to return matched embeddings.
"""
_client: supabase.client.Client
# This is the embedding function. Don't confuse with the embedding vectors.
# We should perhaps rename the underlying Embedding base class to EmbeddingFunction
# or something
_embedding: Embeddings
table_name: str
query_name: str
def __init__(
self,
client: supabase.client.Client,
embedding: Embeddings,
table_name: str, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-1 | embedding: Embeddings,
table_name: str,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ValueError(
"Could not import supabase python package. "
"Please install it with `pip install supabase`."
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or "documents"
self.query_name = query_name or "match_documents"
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict[Any, Any]]] = None,
**kwargs: Any,
) -> List[str]:
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs)
[docs] @classmethod
def from_texts(
cls: Type["SupabaseVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[supabase.client.Client] = None,
table_name: Optional[str] = "documents",
query_name: Union[str, None] = "match_documents",
**kwargs: Any,
) -> "SupabaseVectorStore":
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError("Supabase client is required.")
if not table_name:
raise ValueError("Supabase document table_name is required.") | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-2 | raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
docs = cls._texts_to_documents(texts, metadatas)
_ids = cls._add_vectors(client, table_name, embeddings, docs)
return cls(
client=client,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
[docs] def add_vectors(
self, vectors: List[List[float]], documents: List[Document]
) -> List[str]:
return self._add_vectors(self._client, self.table_name, vectors, documents)
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector(vectors[0], k)
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(embedding, k)
documents = [doc for doc, _ in result]
return documents
[docs] def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k)
[docs] def similarity_search_by_vector_with_relevance_scores(
self, query: List[float], k: int
) -> List[Tuple[Document, float]]: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-3 | ) -> List[Tuple[Document, float]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
return match_result
[docs] def similarity_search_by_vector_returning_embeddings(
self, query: List[float], k: int
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
# Supabase returns a vector type as its string represation (!).
# This is a hack to convert the string to numpy array.
np.fromstring(
search.get("embedding", "").strip("[]"), np.float32, sep=","
),
)
for search in res.data
if search.get("content")
]
return match_result
@staticmethod
def _texts_to_documents(
texts: Iterable[str],
metadatas: Optional[Iterable[dict[Any, Any]]] = None,
) -> List[Document]: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-4 | ) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metadata in zip(texts, metadatas)
]
return docs
@staticmethod
def _add_vectors(
client: supabase.client.Client,
table_name: str,
vectors: List[List[float]],
documents: List[Document],
) -> List[str]:
"""Add vectors to Supabase table."""
rows: List[dict[str, Any]] = [
{
"content": documents[idx].page_content,
"embedding": embedding,
"metadata": documents[idx].metadata, # type: ignore
}
for idx, embedding in enumerate(vectors)
]
# According to the SupabaseVectorStore JS implementation, the best chunk size
# is 500
chunk_size = 500
id_list: List[str] = []
for i in range(0, len(rows), chunk_size):
chunk = rows[i : i + chunk_size]
result = client.from_(table_name).insert(chunk).execute() # type: ignore
if len(result.data) == 0:
raise Exception("Error inserting: No rows added")
# VectorStore.add_vectors returns ids as strings
ids = [str(i.get("id")) for i in result.data if i.get("id")]
id_list.extend(ids)
return id_list
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-5 | self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
result = self.similarity_search_by_vector_returning_embeddings(
embedding, fetch_k
)
matched_documents = [doc_tuple[0] for doc_tuple in result]
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
filtered_documents = [matched_documents[i] for i in mmr_selected]
return filtered_documents
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-6 | **kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
`max_marginal_relevance_search` requires that `query_name` returns matched
embeddings alongside the match documents. The following function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(
id bigint,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGIN
RETURN query
SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROM
docstore
ORDER BY
docstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;
```
"""
embedding = self._embedding.embed_documents([query])
docs = self.max_marginal_relevance_search_by_vector(
embedding[0], k, fetch_k, lambda_mult=lambda_mult
)
return docs | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a52838bcc4b3-7 | )
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
6199e161a542-0 | Source code for langchain.vectorstores.lancedb
"""Wrapper around LanceDB vector database"""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
[docs]class LanceDB(VectorStore):
"""Wrapper around LanceDB vector database.
To use, you should have ``lancedb`` python package installed.
Example:
.. code-block:: python
db = lancedb.connect('./lancedb')
table = db.open_table('my_table')
vectorstore = LanceDB(table, embedding_function)
vectorstore.add_texts(['text1', 'text2'])
result = vectorstore.similarity_search('text1')
"""
def __init__(
self,
connection: Any,
embedding: Embeddings,
vector_key: Optional[str] = "vector",
id_key: Optional[str] = "id",
text_key: Optional[str] = "text",
):
"""Initialize with Lance DB connection"""
try:
import lancedb
except ImportError:
raise ValueError(
"Could not import lancedb python package. "
"Please install it with `pip install lancedb`."
)
if not isinstance(connection, lancedb.db.LanceTable):
raise ValueError(
"connection should be an instance of lancedb.db.LanceTable, ",
f"got {type(connection)}",
)
self._connection = connection
self._embedding = embedding
self._vector_key = vector_key
self._id_key = id_key
self._text_key = text_key | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html |
6199e161a542-1 | self._id_key = id_key
self._text_key = text_key
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Turn texts into embedding and add it to the database
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids of the added texts.
"""
# Embed texts and create documents
docs = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self._embedding.embed_documents(list(texts))
for idx, text in enumerate(texts):
embedding = embeddings[idx]
metadata = metadatas[idx] if metadatas else {}
docs.append(
{
self._vector_key: embedding,
self._id_key: ids[idx],
self._text_key: text,
**metadata,
}
)
self._connection.add(docs)
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return documents most similar to the query
Args:
query: String to query the vectorstore with.
k: Number of documents to return.
Returns:
List of documents most similar to the query.
"""
embedding = self._embedding.embed_query(query) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html |
6199e161a542-2 | """
embedding = self._embedding.embed_query(query)
docs = self._connection.search(embedding).limit(k).to_df()
return [
Document(
page_content=row[self._text_key],
metadata=row[docs.columns != self._text_key],
)
for _, row in docs.iterrows()
]
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
connection: Any = None,
vector_key: Optional[str] = "vector",
id_key: Optional[str] = "id",
text_key: Optional[str] = "text",
**kwargs: Any,
) -> LanceDB:
instance = LanceDB(
connection,
embedding,
vector_key,
id_key,
text_key,
)
instance.add_texts(texts, metadatas=metadatas, **kwargs)
return instance
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html |
ff2ee3f76089-0 | Source code for langchain.vectorstores.qdrant
"""Wrapper around Qdrant vector database."""
from __future__ import annotations
import uuid
import warnings
from itertools import islice
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
[docs]class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-1 | metadata_payload_key: str = METADATA_KEY,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self.embeddings = embeddings
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated" | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-2 | "Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self.embeddings = None
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
added_ids = []
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch.construct(
ids=batch_ids,
vectors=self._embed_texts(batch_texts), | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-3 | ids=batch_ids,
vectors=self._embed_texts(batch_texts),
payloads=self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
added_ids.extend(batch_ids)
return added_ids
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-4 | - int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-5 | score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of documents most similar to the query text and cosine
distance in float for each.
Lower score represents more similarity.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
results = self.client.search(
collection_name=self.collection_name,
query_vector=self._embed_query(query),
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-6 | with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
return self.similarity_search_with_score(query, k, **kwargs)
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-7 | Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=fetch_k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
[docs] @classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-8 | api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-9 | location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-10 | Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
**kwargs:
Additional arguments passed directly into REST client initialization | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-11 | **kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-12 | )
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = embedding.embed_documents(batch_texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch.construct(
ids=batch_ids,
vectors=batch_embeddings,
payloads=cls._build_payloads(
batch_texts,
batch_metadatas,
content_payload_key,
metadata_payload_key,
),
),
)
return cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-13 | embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict): | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-14 | for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
def _embed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
else:
if self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
ff2ee3f76089-15 | Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
388733eb6075-0 | Source code for langchain.vectorstores.base
"""Interface for vector stores."""
from __future__ import annotations
import asyncio
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import (
Any,
ClassVar,
Collection,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
)
from pydantic import BaseModel, Field, root_validator
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
VST = TypeVar("VST", bound="VectorStore")
[docs]class VectorStore(ABC):
"""Interface for vector stores."""
[docs] @abstractmethod
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
[docs] async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore."""
raise NotImplementedError
[docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-1 | """Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
# TODO: Handle the case where the user doesn't provide ids on the Collection
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
[docs] async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return await self.aadd_texts(texts, metadatas, **kwargs)
[docs] def search(self, query: str, search_type: str, **kwargs: Any) -> List[Document]:
"""Return docs most similar to query using specified search type."""
if search_type == "similarity":
return self.similarity_search(query, **kwargs)
elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs)
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity' or 'mmr'."
)
[docs] async def asearch( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-2 | )
[docs] async def asearch(
self, query: str, search_type: str, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query using specified search type."""
if search_type == "similarity":
return await self.asimilarity_search(query, **kwargs)
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs)
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity' or 'mmr'."
)
[docs] @abstractmethod
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
[docs] def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
docs_and_similarities = self._similarity_search_with_relevance_scores(
query, k=k, **kwargs
) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-3 | query, k=k, **kwargs
)
if any(
similarity < 0.0 or similarity > 1.0
for _, similarity in docs_and_similarities
):
warnings.warn(
"Relevance scores must be between"
f" 0 and 1, got {docs_and_similarities}"
)
score_threshold = kwargs.get("score_threshold")
if score_threshold is not None:
docs_and_similarities = [
(doc, similarity)
for doc, similarity in docs_and_similarities
if similarity >= score_threshold
]
if len(docs_and_similarities) == 0:
warnings.warn(
f"No relevant docs were retrieved using the relevance score\
threshold {score_threshold}"
)
return docs_and_similarities
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
raise NotImplementedError
[docs] async def asimilarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(self.similarity_search_with_relevance_scores, query, k, **kwargs) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-4 | return await asyncio.get_event_loop().run_in_executor(None, func)
[docs] async def asimilarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(self.similarity_search, query, k, **kwargs)
return await asyncio.get_event_loop().run_in_executor(None, func)
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
raise NotImplementedError
[docs] async def asimilarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(self.similarity_search_by_vector, embedding, k, **kwargs)
return await asyncio.get_event_loop().run_in_executor(None, func)
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-5 | self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
raise NotImplementedError
[docs] async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search, query, k, fetch_k, lambda_mult, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
[docs] def max_marginal_relevance_search_by_vector(
self, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-6 | [docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
raise NotImplementedError
[docs] async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
raise NotImplementedError
[docs] @classmethod
def from_documents(
cls: Type[VST],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> VST:
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents] | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-7 | texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
[docs] @classmethod
async def afrom_documents(
cls: Type[VST],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> VST:
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs)
[docs] @classmethod
@abstractmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
"""Return VectorStore initialized from texts and embeddings."""
[docs] @classmethod
async def afrom_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
"""Return VectorStore initialized from texts and embeddings."""
raise NotImplementedError
[docs] def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever:
return VectorStoreRetriever(vectorstore=self, **kwargs)
class VectorStoreRetriever(BaseRetriever, BaseModel):
vectorstore: VectorStore
search_type: str = "similarity" | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-8 | vectorstore: VectorStore
search_type: str = "similarity"
search_kwargs: dict = Field(default_factory=dict)
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"mmr",
)
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
search_type = values["search_type"]
if search_type not in cls.allowed_search_types:
raise ValueError(
f"search_type of {search_type} not allowed. Valid values are: "
f"{cls.allowed_search_types}"
)
if search_type == "similarity_score_threshold":
score_threshold = values["search_kwargs"].get("score_threshold")
if (score_threshold is None) or (not isinstance(score_threshold, float)):
raise ValueError(
"`score_threshold` is not specified with a float value(0~1) "
"in `search_kwargs`."
)
return values
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == "similarity_score_threshold":
docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
388733eb6075-9 | docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = await self.vectorstore.asimilarity_search(
query, **self.search_kwargs
)
elif self.search_type == "similarity_score_threshold":
docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == "mmr":
docs = await self.vectorstore.amax_marginal_relevance_search(
query, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
return await self.vectorstore.aadd_documents(documents, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html |
e8414c2133c8-0 | Source code for langchain.vectorstores.atlas
"""Wrapper around Atlas by Nomic."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger(__name__)
[docs]class AtlasDB(VectorStore):
"""Wrapper around Atlas: Nomic's neural database and rhizomatic instrument.
To use, you should have the ``nomic`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import AtlasDB
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AtlasDB("my_project", embeddings.embed_query)
"""
_ATLAS_DEFAULT_ID_FIELD = "atlas_id"
def __init__(
self,
name: str,
embedding_function: Optional[Embeddings] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
) -> None:
"""
Initialize the Atlas Client
Args:
name (str): The name of your project. If the project already exists,
it will be loaded.
embedding_function (Optional[Callable]): An optional function used for
embedding your data. If None, data will be embedded with
Nomic's embed model.
api_key (str): Your nomic API key
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-1 | is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
"""
try:
import nomic
from nomic import AtlasProject
except ImportError:
raise ValueError(
"Could not import nomic python package. "
"Please install it with `pip install nomic`."
)
if api_key is None:
raise ValueError("No API key provided. Sign up at atlas.nomic.ai!")
nomic.login(api_key)
self._embedding_function = embedding_function
modality = "text"
if self._embedding_function is not None:
modality = "embedding"
# Check if the project exists, create it if not
self.project = AtlasProject(
name=name,
description=description,
modality=modality,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD,
)
self.project._latest_project_state()
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-2 | metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts.
"""
if (
metadatas is not None
and len(metadatas) > 0
and "text" in metadatas[0].keys()
):
raise ValueError("Cannot accept key text in metadata!")
texts = list(texts)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
# Embedding upload case
if self._embedding_function is not None:
_embeddings = self._embedding_function.embed_documents(texts)
embeddings = np.stack(_embeddings)
if metadatas is None:
data = [
{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]}
for i, _ in enumerate(texts)
]
else:
for i in range(len(metadatas)):
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
metadatas[i]["text"] = texts[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_embeddings(embeddings=embeddings, data=data)
# Text upload case
else:
if metadatas is None:
data = [ | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-3 | else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] = texts
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_text(data)
if refresh:
if len(self.project.indices) > 0:
with self.project.wait_for_project_lock():
self.project.rebuild_maps()
return ids
[docs] def create_index(self, **kwargs: Any) -> Any:
"""Creates an index in your project.
See
https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index
for full detail.
"""
with self.project.wait_for_project_lock():
return self.project.create_index(**kwargs)
[docs] def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-4 | """
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(
queries=embedding, k=k
)
datas = self.project.get_data(ids=neighbors[0])
docs = [
Document(page_content=datas[i]["text"], metadata=datas[i])
for i, neighbor in enumerate(neighbors)
]
return docs
[docs] @classmethod
def from_texts(
cls: Type[AtlasDB],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-5 | ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
# Inject relevant kwargs
all_index_kwargs = {"name": name + "_index", "indexed_field": "text"}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
# Build project
atlasDB = cls(
name,
embedding_function=embedding,
api_key=api_key,
description="A description for your project",
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
[docs] @classmethod
def from_documents(
cls: Type[AtlasDB],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-6 | ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a list of documents.
Args:
name (str): Name of the collection to create.
api_key (str): Your nomic API key,
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if
it already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
name=name,
api_key=api_key, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
e8414c2133c8-7 | return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
index_kwargs=index_kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
cf1e7617c4b5-0 | Source code for langchain.vectorstores.myscale
"""Wrapper around MyScale vector database."""
from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from pydantic import BaseSettings
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
def has_mul_sub_str(s: str, *args: Any) -> bool:
for a in args:
if a not in s:
return False
return True
[docs]class MyScaleSettings(BaseSettings):
"""MyScale Client Configuration
Attribute:
myscale_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (dict): index build parameter.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{ | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-1 | .. code-block:: python
{
'id': 'text_id',
'vector': 'text_embedding',
'text': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8443
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "IVFFLAT"
index_param: Optional[Dict[str, str]] = None
column_map: Dict[str, str] = {
"id": "id",
"text": "text",
"vector": "vector",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "cosine"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "myscale_"
env_file_encoding = "utf-8"
[docs]class MyScale(VectorStore):
"""Wrapper around MyScale vector database
You need a `clickhouse-connect` python package, and a valid account
to connect to MyScale.
MyScale can not only search with simple vector indexes,
it also supports complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
[myscale official site](https://docs.myscale.com/en/overview/)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[MyScaleSettings] = None,
**kwargs: Any, | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-2 | config: Optional[MyScaleSettings] = None,
**kwargs: Any,
) -> None:
"""MyScale Wrapper to LangChain
embedding_function (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ValueError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "vector", "text", "metadata"]:
assert k in self.config.column_map
assert self.config.metric in ["ip", "cosine", "l2"]
# initialize the schema
dim = len(embedding.embed_query("try this out"))
index_params = (
", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-3 | CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length(\
{self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} \
TYPE {self.config.index_type}(\
'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding.embed_query
self.dist_order = "ASC" if self.config.metric in ["cosine", "l2"] else "DESC"
# Create a connection to myscale
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
self.client.command("SET allow_experimental_object_type=1")
self.client.command(schema_)
[docs] def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n]) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-4 | _data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_i_str = self._build_istr(transac, column_names)
self.client.command(_i_str)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["text"]: texts,
colmap_["vector"]: map(self.embedding_function, texts),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas) | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-5 | column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert len(v[keys.index(self.config.column_map["vector"])]) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[MyScaleSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> MyScale:
"""Create Myscale wrapper with existing texts
Args:
embedding_function (Embeddings): Function to extract text embedding
texts (Iterable[str]): List or tuple of strings to be added | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-6 | texts (Iterable[str]): List or tuple of strings to be added
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += ( | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-7 | ).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_qstr(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['text']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
[docs] def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-8 | of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self.embedding_function(query), k, where_str, **kwargs
)
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-9 | ]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self.embedding_function(query), k, where_str)
try:
return [
(
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e: | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
cf1e7617c4b5-10 | ]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 11, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html |
Subsets and Splits