date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~dashscope.py | from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]:
multiplier = 1
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 1 seconds, then up to 4 seconds, then 4 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
return resp.output["embeddings"]
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return _embed_with_retry(**kwargs)
class DashScopeEmbeddings(BaseModel, Embeddings):
"""DashScope embedding models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key")
Example:
.. code-block:: python
import os
os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY"
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(
model="text-embedding-v1",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
"""The DashScope client."""
model: str = "text-embedding-v1"
dashscope_api_key: Optional[str] = None
max_retries: int = 5
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
import dashscope
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
)
dashscope.api_key = values["dashscope_api_key"]
try:
import dashscope
values["client"] = dashscope.TextEmbedding
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(
self, input=texts, text_type="document", model=self.model
)
embedding_list = [item["embedding"] for item in embeddings]
return embedding_list
def embed_query(self, text: str) -> List[float]:
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(
self, input=text, text_type="query", model=self.model
)[0]["embedding"]
return embedding
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_xata.py | """Test Xata chat memory store functionality.
Before running this test, please create a Xata database.
"""
import json
import os
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import XataChatMessageHistory
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_xata_chat_memory(self) -> None:
message_history = XataChatMessageHistory(
api_key=os.getenv("XATA_API_KEY", ""),
db_url=os.getenv("XATA_DB_URL", ""),
session_id="integration-test-session",
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Redis, so the next test run won't pick it up
memory.chat_memory.clear()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~mongodb_atlas.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from pymongo.collection import Collection
MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 100
class MongoDBAtlasVectorSearch(VectorStore):
"""`MongoDB Atlas Vector Search` vector store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB Atlas Cluster having deployed an
Atlas Search index
Example:
.. code-block:: python
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_community.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[MongoDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "default",
text_key: str = "text",
embedding_key: str = "embedding",
relevance_score_fn: str = "cosine",
):
"""
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
text_key: MongoDB field that will contain the text for each
document.
embedding_key: MongoDB field that will contain the embedding for
each document.
index_name: Name of the Atlas Search index.
relevance_score_fn: The similarity score used for the index.
Currently supported: Euclidean, cosine, and dot product.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
self._relevance_score_fn = relevance_score_fn
@property
def embeddings(self) -> Embeddings:
return self._embedding
def _select_relevance_score_fn(self) -> Callable[[float], float]:
if self._relevance_score_fn == "euclidean":
return self._euclidean_relevance_score_fn
elif self._relevance_score_fn == "dotProduct":
return self._max_inner_product_relevance_score_fn
elif self._relevance_score_fn == "cosine":
return self._cosine_relevance_score_fn
else:
raise NotImplementedError(
f"No relevance score function for ${self._relevance_score_fn}"
)
@classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
"""
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(
connection_string,
driver=DriverInfo(name="Langchain", version=version("langchain")),
)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in MongoDB Atlas
insert_result = self._collection.insert_many(to_insert) # type: ignore
return insert_result.inserted_ids
def _similarity_search_with_score(
self,
embedding: List[float],
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
params = {
"queryVector": embedding,
"path": self._embedding_key,
"numCandidates": k * 10,
"limit": k,
"index": self._index_name,
}
if pre_filter:
params["filter"] = pre_filter
query = {"$vectorSearch": params}
pipeline = [
query,
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline) # type: ignore[arg-type]
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop("score")
docs.append((Document(page_content=text, metadata=res), score))
return docs
def similarity_search_with_score(
self,
query: str,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return MongoDB documents most similar to the given query and their scores.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
embedding,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return docs
def similarity_search(
self,
query: str,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return MongoDB documents most similar to the given query.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return documents selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
fetch_k: (Optional) number of documents to fetch before passing to MMR
algorithm. Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
pre_filter: (Optional) dictionary of argument(s) to prefilter on document
fields.
post_filter_pipeline: (Optional) pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents selected by maximal marginal relevance.
"""
query_embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
query_embedding,
k=fetch_k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
mmr_doc_indexes = maximal_marginal_relevance(
np.array(query_embedding),
[doc.metadata[self._embedding_key] for doc, _ in docs],
k=k,
lambda_mult=lambda_mult,
)
mmr_docs = [docs[i][0] for i in mmr_doc_indexes]
return mmr_docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict]] = None,
collection: Optional[Collection[MongoDBDocumentType]] = None,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided MongoDB Atlas Vector Search index
(Lucene)
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from pymongo import MongoClient
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embeddings,
metadatas=metadatas,
collection=collection
)
"""
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vectorstore = cls(collection, embedding, **kwargs)
vectorstore.add_texts(texts, metadatas=metadatas)
return vectorstore
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~baseten.py | import logging
import os
from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Field
logger = logging.getLogger(__name__)
class Baseten(LLM):
"""Baseten model
This module allows using LLMs hosted on Baseten.
The LLM deployed on Baseten must have the following properties:
* Must accept input as a dictionary with the key "prompt"
* May accept other input in the dictionary passed through with kwargs
* Must return a string with the model output
To use this module, you must:
* Export your Baseten API key as the environment variable `BASETEN_API_KEY`
* Get the model ID for your model from your Baseten dashboard
* Identify the model deployment ("production" for all model library models)
These code samples use
[Mistral 7B Instruct](https://app.baseten.co/explore/mistral_7b_instruct)
from Baseten's model library.
Examples:
.. code-block:: python
from langchain_community.llms import Baseten
# Production deployment
mistral = Baseten(model="MODEL_ID", deployment="production")
mistral("What is the Mistral wind?")
.. code-block:: python
from langchain_community.llms import Baseten
# Development deployment
mistral = Baseten(model="MODEL_ID", deployment="development")
mistral("What is the Mistral wind?")
.. code-block:: python
from langchain_community.llms import Baseten
# Other published deployment
mistral = Baseten(model="MODEL_ID", deployment="DEPLOYMENT_ID")
mistral("What is the Mistral wind?")
"""
model: str
deployment: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
baseten_api_key = os.environ["BASETEN_API_KEY"]
model_id = self.model
if self.deployment == "production":
model_url = f"https://model-{model_id}.api.baseten.co/production/predict"
elif self.deployment == "development":
model_url = f"https://model-{model_id}.api.baseten.co/development/predict"
else: # try specific deployment ID
model_url = f"https://model-{model_id}.api.baseten.co/deployment/{self.deployment}/predict"
response = requests.post(
model_url,
headers={"Authorization": f"Api-Key {baseten_api_key}"},
json={"prompt": prompt, **kwargs},
)
return response.json()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~databricks_vector_search.py | from __future__ import annotations
import json
import logging
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VST, VectorStore
if TYPE_CHECKING:
from databricks.vector_search.client import VectorSearchIndex
logger = logging.getLogger(__name__)
class DatabricksVectorSearch(VectorStore):
"""`Databricks Vector Search` vector store.
To use, you should have the ``databricks-vectorsearch`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import DatabricksVectorSearch
from databricks.vector_search.client import VectorSearchClient
vs_client = VectorSearchClient()
vs_index = vs_client.get_index(
endpoint_name="vs_endpoint",
index_name="ml.llm.index"
)
vectorstore = DatabricksVectorSearch(vs_index)
Args:
index: A Databricks Vector Search index object.
embedding: The embedding model.
Required for direct-access index or delta-sync index
with self-managed embeddings.
text_column: The name of the text column to use for the embeddings.
Required for direct-access index or delta-sync index
with self-managed embeddings.
Make sure the text column specified is in the index.
columns: The list of column names to get when doing the search.
Defaults to ``[primary_key, text_column]``.
Delta-sync index with Databricks-managed embeddings manages the ingestion, deletion,
and embedding for you.
Manually ingestion/deletion of the documents/texts is not supported for delta-sync
index.
If you want to use a delta-sync index with self-managed embeddings, you need to
provide the embedding model and text column name to use for the embeddings.
Example:
.. code-block:: python
from langchain_community.vectorstores import DatabricksVectorSearch
from databricks.vector_search.client import VectorSearchClient
from langchain_community.embeddings.openai import OpenAIEmbeddings
vs_client = VectorSearchClient()
vs_index = vs_client.get_index(
endpoint_name="vs_endpoint",
index_name="ml.llm.index"
)
vectorstore = DatabricksVectorSearch(
index=vs_index,
embedding=OpenAIEmbeddings(),
text_column="document_content"
)
If you want to manage the documents ingestion/deletion yourself, you can use a
direct-access index.
Example:
.. code-block:: python
from langchain_community.vectorstores import DatabricksVectorSearch
from databricks.vector_search.client import VectorSearchClient
from langchain_community.embeddings.openai import OpenAIEmbeddings
vs_client = VectorSearchClient()
vs_index = vs_client.get_index(
endpoint_name="vs_endpoint",
index_name="ml.llm.index"
)
vectorstore = DatabricksVectorSearch(
index=vs_index,
embedding=OpenAIEmbeddings(),
text_column="document_content"
)
vectorstore.add_texts(
texts=["text1", "text2"]
)
For more information on Databricks Vector Search, see `Databricks Vector Search
documentation: https://docs.databricks.com/en/generative-ai/vector-search.html.
"""
def __init__(
self,
index: VectorSearchIndex,
*,
embedding: Optional[Embeddings] = None,
text_column: Optional[str] = None,
columns: Optional[List[str]] = None,
):
try:
from databricks.vector_search.client import VectorSearchIndex
except ImportError as e:
raise ImportError(
"Could not import databricks-vectorsearch python package. "
"Please install it with `pip install databricks-vectorsearch`."
) from e
# index
self.index = index
if not isinstance(index, VectorSearchIndex):
raise TypeError("index must be of type VectorSearchIndex.")
# index_details
index_details = self.index.describe()
self.primary_key = index_details["primary_key"]
self.index_type = index_details.get("index_type")
self._delta_sync_index_spec = index_details.get("delta_sync_index_spec", dict())
self._direct_access_index_spec = index_details.get(
"direct_access_index_spec", dict()
)
# text_column
if self._is_databricks_managed_embeddings():
index_source_column = self._embedding_source_column_name()
# check if input text column matches the source column of the index
if text_column is not None and text_column != index_source_column:
raise ValueError(
f"text_column '{text_column}' does not match with the "
f"source column of the index: '{index_source_column}'."
)
self.text_column = index_source_column
else:
self._require_arg(text_column, "text_column")
self.text_column = text_column
# columns
self.columns = columns or []
# add primary key column and source column if not in columns
if self.primary_key not in self.columns:
self.columns.append(self.primary_key)
if self.text_column and self.text_column not in self.columns:
self.columns.append(self.text_column)
# Validate specified columns are in the index
if self._is_direct_access_index():
index_schema = self._index_schema()
if index_schema:
for col in self.columns:
if col not in index_schema:
raise ValueError(
f"column '{col}' is not in the index's schema."
)
# embedding model
if not self._is_databricks_managed_embeddings():
# embedding model is required for direct-access index
# or delta-sync index with self-managed embedding
self._require_arg(embedding, "embedding")
self._embedding = embedding
# validate dimension matches
index_embedding_dimension = self._embedding_vector_column_dimension()
if index_embedding_dimension is not None:
inferred_embedding_dimension = self._infer_embedding_dimension()
if inferred_embedding_dimension != index_embedding_dimension:
raise ValueError(
f"embedding model's dimension '{inferred_embedding_dimension}' "
f"does not match with the index's dimension "
f"'{index_embedding_dimension}'."
)
else:
if embedding is not None:
logger.warning(
"embedding model is not used in delta-sync index with "
"Databricks-managed embeddings."
)
self._embedding = None
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
raise NotImplementedError(
"`from_texts` is not supported. "
"Use `add_texts` to add to existing direct-access index."
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[Any]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts to the index.
Only support direct-access index.
Args:
texts: List of texts to add.
metadatas: List of metadata for each text. Defaults to None.
ids: List of ids for each text. Defaults to None.
If not provided, a random uuid will be generated for each text.
Returns:
List of ids from adding the texts into the index.
"""
self._op_require_direct_access_index("add_texts")
assert self.embeddings is not None, "embedding model is required."
# Wrap to list if input texts is a single string
if isinstance(texts, str):
texts = [texts]
texts = list(texts)
vectors = self.embeddings.embed_documents(texts)
ids = ids or [str(uuid.uuid4()) for _ in texts]
metadatas = metadatas or [{} for _ in texts]
updates = [
{
self.primary_key: id_,
self.text_column: text,
self._embedding_vector_column_name(): vector,
**metadata,
}
for text, vector, id_, metadata in zip(texts, vectors, ids, metadatas)
]
upsert_resp = self.index.upsert(updates)
if upsert_resp.get("status") in ("PARTIAL_SUCCESS", "FAILURE"):
failed_ids = upsert_resp.get("result", dict()).get(
"failed_primary_keys", []
)
if upsert_resp.get("status") == "FAILURE":
logger.error("Failed to add texts to the index.")
else:
logger.warning("Some texts failed to be added to the index.")
return [id_ for id_ in ids if id_ not in failed_ids]
return ids
@property
def embeddings(self) -> Optional[Embeddings]:
"""Access the query embedding object if available."""
return self._embedding
def delete(self, ids: Optional[List[Any]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete documents from the index.
Only support direct-access index.
Args:
ids: List of ids of documents to delete.
Returns:
True if successful.
"""
self._op_require_direct_access_index("delete")
if ids is None:
raise ValueError("ids must be provided.")
self.index.delete(ids)
return True
def similarity_search(
self, query: str, k: int = 4, filters: Optional[Any] = None, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_with_score(
query=query, k=k, filters=filters, **kwargs
)
return [doc for doc, _ in docs_with_score]
def similarity_search_with_score(
self, query: str, k: int = 4, filters: Optional[Any] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding and score for each.
"""
if self._is_databricks_managed_embeddings():
query_text = query
query_vector = None
else:
assert self.embeddings is not None, "embedding model is required."
query_text = None
query_vector = self.embeddings.embed_query(query)
search_resp = self.index.similarity_search(
columns=self.columns,
query_text=query_text,
query_vector=query_vector,
filters=filters,
num_results=k,
)
return self._parse_search_response(search_resp)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filters: Optional[Any] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_by_vector_with_score(
embedding=embedding, k=k, filters=filters, **kwargs
)
return [doc for doc, _ in docs_with_score]
def similarity_search_by_vector_with_score(
self,
embedding: List[float],
k: int = 4,
filters: Optional[Any] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector, along with scores.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding and score for each.
"""
if self._is_databricks_managed_embeddings():
raise ValueError(
"`similarity_search_by_vector` is not supported for index with "
"Databricks-managed embeddings."
)
search_resp = self.index.similarity_search(
columns=self.columns,
query_vector=embedding,
filters=filters,
num_results=k,
)
return self._parse_search_response(search_resp)
def _parse_search_response(self, search_resp: dict) -> List[Tuple[Document, float]]:
"""Parse the search response into a list of Documents with score."""
columns = [
col["name"]
for col in search_resp.get("manifest", dict()).get("columns", [])
]
docs_with_score = []
for result in search_resp.get("result", dict()).get("data_array", []):
doc_id = result[columns.index(self.primary_key)]
text_content = result[columns.index(self.text_column)]
metadata = {
col: value
for col, value in zip(columns[:-1], result[:-1])
if col not in [self.primary_key, self.text_column]
}
metadata[self.primary_key] = doc_id
score = result[-1]
doc = Document(page_content=text_content, metadata=metadata)
docs_with_score.append((doc, score))
return docs_with_score
def _index_schema(self) -> Optional[dict]:
"""Return the index schema as a dictionary.
Return None if no schema found.
"""
if self._is_direct_access_index():
schema_json = self._direct_access_index_spec.get("schema_json")
if schema_json is not None:
return json.loads(schema_json)
return None
def _embedding_vector_column_name(self) -> Optional[str]:
"""Return the name of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get("name")
def _embedding_vector_column_dimension(self) -> Optional[int]:
"""Return the dimension of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get("embedding_dimension")
def _embedding_vector_column(self) -> dict:
"""Return the embedding vector column configs as a dictionary.
Empty if the index is not a self-managed embedding index.
"""
index_spec = (
self._delta_sync_index_spec
if self._is_delta_sync_index()
else self._direct_access_index_spec
)
return next(iter(index_spec.get("embedding_vector_columns") or list()), dict())
def _embedding_source_column_name(self) -> Optional[str]:
"""Return the name of the embedding source column.
None if the index is not a Databricks-managed embedding index.
"""
return self._embedding_source_column().get("name")
def _embedding_source_column(self) -> dict:
"""Return the embedding source column configs as a dictionary.
Empty if the index is not a Databricks-managed embedding index.
"""
index_spec = self._delta_sync_index_spec
return next(iter(index_spec.get("embedding_source_columns") or list()), dict())
def _is_delta_sync_index(self) -> bool:
"""Return True if the index is a delta-sync index."""
return self.index_type == "DELTA_SYNC"
def _is_direct_access_index(self) -> bool:
"""Return True if the index is a direct-access index."""
return self.index_type == "DIRECT_ACCESS"
def _is_databricks_managed_embeddings(self) -> bool:
"""Return True if the embeddings are managed by Databricks Vector Search."""
return (
self._is_delta_sync_index()
and self._embedding_source_column_name() is not None
)
def _infer_embedding_dimension(self) -> int:
"""Infer the embedding dimension from the embedding function."""
assert self.embeddings is not None, "embedding model is required."
return len(self.embeddings.embed_query("test"))
def _op_require_direct_access_index(self, op_name: str) -> None:
"""
Raise ValueError if the operation is not supported for direct-access index."""
if not self._is_direct_access_index():
raise ValueError(f"`{op_name}` is only supported for direct-access index.")
@staticmethod
def _require_arg(arg: Any, arg_name: str) -> None:
"""Raise ValueError if the required arg with name `arg_name` is None."""
if not arg:
raise ValueError(f"`{arg_name}` is required for this index.")
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~smith~evaluation~string_run_evaluator.py | """Run evaluator wrapper for string evaluators."""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.load.dump import dumpd
from libs.core.langchain_core.load.load import load
from libs.core.langchain_core.load.serializable import Serializable
from libs.core.langchain_core.messages import BaseMessage, get_buffer_string, messages_from_dict
from langsmith import EvaluationResult, RunEvaluator
from langsmith.schemas import DataType, Example, Run
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.evaluation.schema import StringEvaluator
from langchain.schema import RUN_KEY
def _get_messages_from_run_dict(messages: List[dict]) -> List[BaseMessage]:
if not messages:
return []
first_message = messages[0]
if "lc" in first_message:
return [load(dumpd(message)) for message in messages]
else:
return messages_from_dict(messages)
class StringRunMapper(Serializable):
"""Extract items to evaluate from the run object."""
@property
def output_keys(self) -> List[str]:
"""The keys to extract from the run."""
return ["prediction", "input"]
@abstractmethod
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
def __call__(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(f"Run {run.id} has no outputs to evaluate.")
return self.map(run)
class LLMStringRunMapper(StringRunMapper):
"""Extract items to evaluate from the run object."""
def serialize_chat_messages(self, messages: List[Dict]) -> str:
"""Extract the input messages from the run."""
if isinstance(messages, list) and messages:
if isinstance(messages[0], dict):
chat_messages = _get_messages_from_run_dict(messages)
elif isinstance(messages[0], list):
# Runs from Tracer have messages as a list of lists of dicts
chat_messages = _get_messages_from_run_dict(messages[0])
else:
raise ValueError(f"Could not extract messages to evaluate {messages}")
return get_buffer_string(chat_messages)
raise ValueError(f"Could not extract messages to evaluate {messages}")
def serialize_inputs(self, inputs: Dict) -> str:
if "prompts" in inputs: # Should we even accept this?
input_ = "\n\n".join(inputs["prompts"])
elif "prompt" in inputs:
input_ = inputs["prompt"]
elif "messages" in inputs:
input_ = self.serialize_chat_messages(inputs["messages"])
else:
raise ValueError("LLM Run must have either messages or prompts as inputs.")
return input_
def serialize_outputs(self, outputs: Dict) -> str:
if not outputs.get("generations"):
raise ValueError("Cannot evaluate LLM Run without generations.")
generations: List[Dict] = outputs["generations"]
if not generations:
raise ValueError("Cannot evaluate LLM run with empty generations.")
first_generation: Dict = generations[0]
if isinstance(first_generation, list):
# Runs from Tracer have generations as a list of lists of dicts
# Whereas Runs from the API have a list of dicts
first_generation = first_generation[0]
if "message" in first_generation:
output_ = self.serialize_chat_messages([first_generation["message"]])
else:
output_ = first_generation["text"]
return output_
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if run.run_type != "llm":
raise ValueError("LLM RunMapper only supports LLM runs.")
elif not run.outputs:
if run.error:
raise ValueError(
f"Cannot evaluate errored LLM run {run.id}: {run.error}"
)
else:
raise ValueError(
f"Run {run.id} has no outputs. Cannot evaluate this run."
)
else:
try:
inputs = self.serialize_inputs(run.inputs)
except Exception as e:
raise ValueError(
f"Could not parse LM input from run inputs {run.inputs}"
) from e
try:
output_ = self.serialize_outputs(run.outputs)
except Exception as e:
raise ValueError(
f"Could not parse LM prediction from run outputs {run.outputs}"
) from e
return {"input": inputs, "prediction": output_}
class ChainStringRunMapper(StringRunMapper):
"""Extract items to evaluate from the run object from a chain."""
input_key: Optional[str] = None
"""The key from the model Run's inputs to use as the eval input.
If not provided, will use the only input key or raise an
error if there are multiple."""
prediction_key: Optional[str] = None
"""The key from the model Run's outputs to use as the eval prediction.
If not provided, will use the only output key or raise an error
if there are multiple."""
def _get_key(self, source: Dict, key: Optional[str], which: str) -> str:
if key is not None:
return source[key]
elif len(source) == 1:
return next(iter(source.values()))
else:
raise ValueError(
f"Could not map run {which} with multiple keys: "
f"{source}\nPlease manually specify a {which}_key"
)
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(
f"Run with ID {run.id} lacks outputs required for evaluation."
" Ensure the Run has valid outputs."
)
if self.input_key is not None and self.input_key not in run.inputs:
raise ValueError(
f"Run with ID {run.id} is missing the expected input key"
f" '{self.input_key}'.\nAvailable input keys in this Run"
f" are: {run.inputs.keys()}.\nAdjust the evaluator's"
f" input_key or ensure your input data includes key"
f" '{self.input_key}'."
)
elif self.prediction_key is not None and self.prediction_key not in run.outputs:
available_keys = ", ".join(run.outputs.keys())
raise ValueError(
f"Run with ID {run.id} doesn't have the expected prediction key"
f" '{self.prediction_key}'. Available prediction keys in this Run are:"
f" {available_keys}. Adjust the evaluator's prediction_key or"
" ensure the Run object's outputs the expected key."
)
else:
input_ = self._get_key(run.inputs, self.input_key, "input")
prediction = self._get_key(run.outputs, self.prediction_key, "prediction")
return {
"input": input_,
"prediction": prediction,
}
class ToolStringRunMapper(StringRunMapper):
"""Map an input to the tool."""
def map(self, run: Run) -> Dict[str, str]:
if not run.outputs:
raise ValueError(f"Run {run.id} has no outputs to evaluate.")
return {"input": run.inputs["input"], "prediction": run.outputs["output"]}
class StringExampleMapper(Serializable):
"""Map an example, or row in the dataset, to the inputs of an evaluation."""
reference_key: Optional[str] = None
@property
def output_keys(self) -> List[str]:
"""The keys to extract from the run."""
return ["reference"]
def serialize_chat_messages(self, messages: List[Dict]) -> str:
"""Extract the input messages from the run."""
chat_messages = _get_messages_from_run_dict(messages)
return get_buffer_string(chat_messages)
def map(self, example: Example) -> Dict[str, str]:
"""Maps the Example, or dataset row to a dictionary."""
if not example.outputs:
raise ValueError(
f"Example {example.id} has no outputs to use as a reference."
)
if self.reference_key is None:
if len(example.outputs) > 1:
raise ValueError(
f"Example {example.id} has multiple outputs, so you must"
" specify a reference_key."
)
else:
output = list(example.outputs.values())[0]
elif self.reference_key not in example.outputs:
raise ValueError(
f"Example {example.id} does not have reference key"
f" {self.reference_key}."
)
else:
output = example.outputs[self.reference_key]
return {
"reference": self.serialize_chat_messages([output])
if isinstance(output, dict) and output.get("type") and output.get("data")
else output
}
def __call__(self, example: Example) -> Dict[str, str]:
"""Maps the Run and Example to a dictionary."""
if not example.outputs:
raise ValueError(
f"Example {example.id} has no outputs to use as areference label."
)
return self.map(example)
class StringRunEvaluatorChain(Chain, RunEvaluator):
"""Evaluate Run and optional examples."""
run_mapper: StringRunMapper
"""Maps the Run to a dictionary with 'input' and 'prediction' strings."""
example_mapper: Optional[StringExampleMapper] = None
"""Maps the Example (dataset row) to a dictionary
with a 'reference' string."""
name: str
"""The name of the evaluation metric."""
string_evaluator: StringEvaluator
"""The evaluation chain."""
@property
def input_keys(self) -> List[str]:
return ["run", "example"]
@property
def output_keys(self) -> List[str]:
return ["feedback"]
def _prepare_input(self, inputs: Dict[str, Any]) -> Dict[str, str]:
run: Run = inputs["run"]
example: Optional[Example] = inputs.get("example")
evaluate_strings_inputs = self.run_mapper(run)
if not self.string_evaluator.requires_input:
# Hide warning about unused input
evaluate_strings_inputs.pop("input", None)
if example and self.example_mapper and self.string_evaluator.requires_reference:
evaluate_strings_inputs.update(self.example_mapper(example))
elif self.string_evaluator.requires_reference:
raise ValueError(
f"Evaluator {self.name} requires an reference"
" example from the dataset,"
f" but none was provided for run {run.id}."
)
return evaluate_strings_inputs
def _prepare_output(self, output: Dict[str, Any]) -> Dict[str, Any]:
evaluation_result = EvaluationResult(
key=self.name, comment=output.get("reasoning"), **output
)
if RUN_KEY in output:
# TODO: Not currently surfaced. Update
evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY]
return {"feedback": evaluation_result}
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = self.string_evaluator.evaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = await self.string_evaluator.aevaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
def _prepare_evaluator_output(self, output: Dict[str, Any]) -> EvaluationResult:
feedback: EvaluationResult = output["feedback"]
if RUN_KEY not in feedback.evaluator_info:
feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
return feedback
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = self({"run": run, "example": example}, include_run_info=True)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
# TODO: Add run ID once we can declare it via callbacks
)
async def aevaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = await self.acall(
{"run": run, "example": example}, include_run_info=True
)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
)
@classmethod
def from_run_and_data_type(
cls,
evaluator: StringEvaluator,
run_type: str,
data_type: DataType,
input_key: Optional[str] = None,
prediction_key: Optional[str] = None,
reference_key: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> StringRunEvaluatorChain:
"""
Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator (StringEvaluator): The string evaluator to use.
run_type (str): The type of run being evaluated.
Supported types are LLM and Chain.
data_type (DataType): The type of dataset used in the run.
input_key (str, optional): The key used to map the input from the run.
prediction_key (str, optional): The key used to map the prediction from the run.
reference_key (str, optional): The key used to map the reference from the dataset.
tags (List[str], optional): List of tags to attach to the evaluation chain.
Returns:
StringRunEvaluatorChain: The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
""" # noqa: E501
# Configure how run inputs/predictions are passed to the evaluator
if run_type == "llm":
run_mapper: StringRunMapper = LLMStringRunMapper()
elif run_type == "chain":
run_mapper = ChainStringRunMapper(
input_key=input_key, prediction_key=prediction_key
)
else:
raise ValueError(
f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'."
)
# Configure how example rows are fed as a reference string to the evaluator
if (
reference_key is not None
or data_type in (DataType.llm, DataType.chat)
or evaluator.requires_reference
):
example_mapper = StringExampleMapper(reference_key=reference_key)
elif evaluator.requires_reference:
raise ValueError(
f"Evaluator {evaluator.evaluation_name} requires a reference"
" example from the dataset. Please specify the reference key from"
" amongst the dataset outputs keys."
)
else:
example_mapper = None
return cls(
name=evaluator.evaluation_name,
run_mapper=run_mapper,
example_mapper=example_mapper,
string_evaluator=evaluator,
tags=tags,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~slack~send_message.py | from typing import Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
class SlackSendMessage(SlackBaseTool):
"""Tool for sending a message in Slack."""
name: str = "send_message"
description: str = (
"Use this tool to send a message with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
message: str,
channel: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
result = self.client.chat_postMessage(channel=channel, text=message)
output = "Message sent: " + str(result)
return output
except Exception as e:
return "Error creating conversation: {}".format(e)
| [] |
2024-01-10 | mth93/langchain | libs~partners~nvidia-trt~langchain_nvidia_trt~llms.py | from __future__ import annotations
import json
import queue
import random
import time
from functools import partial
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
import google.protobuf.json_format
import numpy as np
import tritonclient.grpc as grpcclient
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models import BaseLLM
from libs.core.langchain_core.outputs import Generation, GenerationChunk, LLMResult
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from tritonclient.grpc.service_pb2 import ModelInferResponse
from tritonclient.utils import np_to_triton_dtype
class TritonTensorRTError(Exception):
"""Base exception for TritonTensorRT."""
class TritonTensorRTRuntimeError(TritonTensorRTError, RuntimeError):
"""Runtime error for TritonTensorRT."""
class TritonTensorRTLLM(BaseLLM):
"""TRTLLM triton models.
Arguments:
server_url: (str) The URL of the Triton inference server to use.
model_name: (str) The name of the Triton TRT model to use.
temperature: (str) Temperature to use for sampling
top_p: (float) The top-p value to use for sampling
top_k: (float) The top k values use for sampling
beam_width: (int) Last n number of tokens to penalize
repetition_penalty: (int) Last n number of tokens to penalize
length_penalty: (float) The penalty to apply repeated tokens
tokens: (int) The maximum number of tokens to generate.
client: The client object used to communicate with the inference server
Example:
.. code-block:: python
from langchain_nvidia_trt import TritonTensorRTLLM
model = TritonTensorRTLLM()
"""
server_url: Optional[str] = Field(None, alias="server_url")
model_name: str = Field(
..., description="The name of the model to use, such as 'ensemble'."
)
## Optional args for the model
temperature: float = 1.0
top_p: float = 0
top_k: int = 1
tokens: int = 100
beam_width: int = 1
repetition_penalty: float = 1.0
length_penalty: float = 1.0
client: grpcclient.InferenceServerClient
stop: List[str] = Field(
default_factory=lambda: ["</s>"], description="Stop tokens."
)
seed: int = Field(42, description="The seed to use for random generation.")
load_model: bool = Field(
True,
description="Request the inference server to load the specified model.\
Certain Triton configurations do not allow for this operation.",
)
def __del__(self):
"""Ensure the client streaming connection is properly shutdown"""
self.client.close()
@root_validator(pre=True, allow_reuse=True)
def validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that python package exists in environment."""
if not values.get("client"):
values["client"] = grpcclient.InferenceServerClient(values["server_url"])
return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "nvidia-trt-llm"
@property
def _model_default_parameters(self) -> Dict[str, Any]:
return {
"tokens": self.tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
"beam_width": self.beam_width,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get all the identifying parameters."""
return {
"server_url": self.server_url,
"model_name": self.model_name,
**self._model_default_parameters,
}
def _get_invocation_params(self, **kwargs: Any) -> Dict[str, Any]:
return {**self._model_default_parameters, **kwargs}
def get_model_list(self) -> List[str]:
"""Get a list of models loaded in the triton server."""
res = self.client.get_model_repository_index(as_json=True)
return [model["name"] for model in res["models"]]
def _load_model(self, model_name: str, timeout: int = 1000) -> None:
"""Load a model into the server."""
if self.client.is_model_ready(model_name):
return
self.client.load_model(model_name)
t0 = time.perf_counter()
t1 = t0
while not self.client.is_model_ready(model_name) and t1 - t0 < timeout:
t1 = time.perf_counter()
if not self.client.is_model_ready(model_name):
raise TritonTensorRTRuntimeError(
f"Failed to load {model_name} on Triton in {timeout}s"
)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
self._load_model(self.model_name)
invocation_params = self._get_invocation_params(**kwargs)
stop_words = stop if stop is not None else self.stop
generations = []
# TODO: We should handle the native batching instead.
for prompt in prompts:
invoc_params = {**invocation_params, "prompt": [[prompt]]}
result: str = self._request(
self.model_name,
stop=stop_words,
**invoc_params,
)
generations.append([Generation(text=result, generation_info={})])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
self._load_model(self.model_name)
invocation_params = self._get_invocation_params(**kwargs, prompt=[[prompt]])
stop_words = stop if stop is not None else self.stop
inputs = self._generate_inputs(stream=True, **invocation_params)
outputs = self._generate_outputs()
result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop_words)
for token in result_queue:
yield GenerationChunk(text=token)
if run_manager:
run_manager.on_llm_new_token(token)
self.client.stop_stream()
##### BELOW ARE METHODS PREVIOUSLY ONLY IN THE GRPC CLIENT
def _request(
self,
model_name: str,
prompt: Sequence[Sequence[str]],
stop: Optional[List[str]] = None,
**params: Any,
) -> str:
"""Request inferencing from the triton server."""
# create model inputs and outputs
inputs = self._generate_inputs(stream=False, prompt=prompt, **params)
outputs = self._generate_outputs()
result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop)
result_str = ""
for token in result_queue:
result_str += token
self.client.stop_stream()
return result_str
def _invoke_triton(self, model_name, inputs, outputs, stop_words):
if not self.client.is_model_ready(model_name):
raise RuntimeError("Cannot request streaming, model is not loaded")
request_id = str(random.randint(1, 9999999)) # nosec
result_queue = StreamingResponseGenerator(
self,
request_id,
force_batch=False,
stop_words=stop_words,
)
self.client.start_stream(
callback=partial(
self._stream_callback,
result_queue,
stop_words=stop_words,
)
)
# Even though this request may not be a streaming request certain configurations
# in Triton prevent the GRPC server from accepting none streaming connections.
# Therefore we call the streaming API and combine the streamed results.
self.client.async_stream_infer(
model_name=model_name,
inputs=inputs,
outputs=outputs,
request_id=request_id,
)
return result_queue
def _generate_outputs(
self,
) -> List[grpcclient.InferRequestedOutput]:
"""Generate the expected output structure."""
return [grpcclient.InferRequestedOutput("text_output")]
def _prepare_tensor(
self, name: str, input_data: np.ndarray
) -> grpcclient.InferInput:
"""Prepare an input data structure."""
t = grpcclient.InferInput(
name, input_data.shape, np_to_triton_dtype(input_data.dtype)
)
t.set_data_from_numpy(input_data)
return t
def _generate_inputs(
self,
prompt: Sequence[Sequence[str]],
tokens: int = 300,
temperature: float = 1.0,
top_k: float = 1,
top_p: float = 0,
beam_width: int = 1,
repetition_penalty: float = 1,
length_penalty: float = 1.0,
stream: bool = True,
) -> List[grpcclient.InferRequestedOutput]:
"""Create the input for the triton inference server."""
query = np.array(prompt).astype(object)
request_output_len = np.array([tokens]).astype(np.uint32).reshape((1, -1))
runtime_top_k = np.array([top_k]).astype(np.uint32).reshape((1, -1))
runtime_top_p = np.array([top_p]).astype(np.float32).reshape((1, -1))
temperature_array = np.array([temperature]).astype(np.float32).reshape((1, -1))
len_penalty = np.array([length_penalty]).astype(np.float32).reshape((1, -1))
repetition_penalty_array = (
np.array([repetition_penalty]).astype(np.float32).reshape((1, -1))
)
random_seed = np.array([self.seed]).astype(np.uint64).reshape((1, -1))
beam_width_array = np.array([beam_width]).astype(np.uint32).reshape((1, -1))
streaming_data = np.array([[stream]], dtype=bool)
inputs = [
self._prepare_tensor("text_input", query),
self._prepare_tensor("max_tokens", request_output_len),
self._prepare_tensor("top_k", runtime_top_k),
self._prepare_tensor("top_p", runtime_top_p),
self._prepare_tensor("temperature", temperature_array),
self._prepare_tensor("length_penalty", len_penalty),
self._prepare_tensor("repetition_penalty", repetition_penalty_array),
self._prepare_tensor("random_seed", random_seed),
self._prepare_tensor("beam_width", beam_width_array),
self._prepare_tensor("stream", streaming_data),
]
return inputs
def _send_stop_signals(self, model_name: str, request_id: str) -> None:
"""Send the stop signal to the Triton Inference server."""
stop_inputs = self._generate_stop_signals()
self.client.async_stream_infer(
model_name,
stop_inputs,
request_id=request_id,
parameters={"Streaming": True},
)
def _generate_stop_signals(
self,
) -> List[grpcclient.InferInput]:
"""Generate the signal to stop the stream."""
inputs = [
grpcclient.InferInput("input_ids", [1, 1], "INT32"),
grpcclient.InferInput("input_lengths", [1, 1], "INT32"),
grpcclient.InferInput("request_output_len", [1, 1], "UINT32"),
grpcclient.InferInput("stop", [1, 1], "BOOL"),
]
inputs[0].set_data_from_numpy(np.empty([1, 1], dtype=np.int32))
inputs[1].set_data_from_numpy(np.zeros([1, 1], dtype=np.int32))
inputs[2].set_data_from_numpy(np.array([[0]], dtype=np.uint32))
inputs[3].set_data_from_numpy(np.array([[True]], dtype="bool"))
return inputs
@staticmethod
def _process_result(result: Dict[str, str]) -> str:
"""Post-process the result from the server."""
message = ModelInferResponse()
google.protobuf.json_format.Parse(json.dumps(result), message)
infer_result = grpcclient.InferResult(message)
np_res = infer_result.as_numpy("text_output")
generated_text = ""
if np_res is not None:
generated_text = "".join([token.decode() for token in np_res])
return generated_text
def _stream_callback(
self,
result_queue: queue.Queue[Union[Optional[Dict[str, str]], str]],
result: grpcclient.InferResult,
error: str,
stop_words: List[str],
) -> None:
"""Add streamed result to queue."""
if error:
result_queue.put(error)
else:
response_raw: dict = result.get_response(as_json=True)
# TODO: Check the response is a map rather than a string
if "outputs" in response_raw:
# the very last response might have no output, just the final flag
response = self._process_result(response_raw)
if response in stop_words:
result_queue.put(None)
else:
result_queue.put(response)
if response_raw["parameters"]["triton_final_response"]["bool_param"]:
# end of the generation
result_queue.put(None)
def stop_stream(
self, model_name: str, request_id: str, signal: bool = True
) -> None:
"""Close the streaming connection."""
if signal:
self._send_stop_signals(model_name, request_id)
self.client.stop_stream()
class StreamingResponseGenerator(queue.Queue):
"""A Generator that provides the inference results from an LLM."""
def __init__(
self,
client: grpcclient.InferenceServerClient,
request_id: str,
force_batch: bool,
stop_words: Sequence[str],
) -> None:
"""Instantiate the generator class."""
super().__init__()
self.client = client
self.request_id = request_id
self._batch = force_batch
self._stop_words = stop_words
def __iter__(self) -> StreamingResponseGenerator:
"""Return self as a generator."""
return self
def __next__(self) -> str:
"""Return the next retrieved token."""
val = self.get()
if val is None or val in self._stop_words:
self.client.stop_stream(
"tensorrt_llm", self.request_id, signal=not self._batch
)
raise StopIteration()
return val
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~human.py | """ChatModel wrapper which returns user input as the response.."""
from io import StringIO
from typing import Any, Callable, Dict, List, Mapping, Optional
import yaml
from libs.core.langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.messages import (
BaseMessage,
HumanMessage,
_message_from_dict,
messages_to_dict,
)
from libs.core.langchain_core.outputs import ChatGeneration, ChatResult
from libs.core.langchain_core.pydantic_v1 import Field
from langchain_community.llms.utils import enforce_stop_tokens
def _display_messages(messages: List[BaseMessage]) -> None:
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(
message,
default_flow_style=False,
sort_keys=False,
allow_unicode=True,
width=10000,
line_break=None,
)
print("\n", "======= start of message =======", "\n\n")
print(yaml_string)
print("======= end of message =======", "\n\n")
def _collect_yaml_input(
messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> BaseMessage:
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = "\n".join(lines)
# Try to parse the input string as YAML
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content="")
if stop:
if isinstance(message.content, str):
message.content = enforce_stop_tokens(message.content, stop)
else:
raise ValueError("Cannot use when output is not a string.")
return message
except yaml.YAMLError:
raise ValueError("Invalid YAML string entered.")
except ValueError:
raise ValueError("Invalid message entered.")
class HumanInputChatModel(BaseChatModel):
"""ChatModel which returns user input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
message_func: Callable = Field(default_factory=lambda: _display_messages)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
message_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"input_func": self.input_func.__name__,
"message_func": self.message_func.__name__,
}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Displays the messages to the user and returns their input as a response.
Args:
messages (List[BaseMessage]): The messages to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
ChatResult: The user's input as a response.
"""
self.message_func(messages, **self.message_kwargs)
user_input = self.input_func(messages, stop=stop, **self.input_kwargs)
return ChatResult(generations=[ChatGeneration(message=user_input)])
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~petals.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Petals(LLM):
"""Petals Bloom models.
To use, you should have the ``petals`` python package installed, and the
environment variable ``HUGGINGFACE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import petals
petals = Petals()
"""
client: Any
"""The client to use for the API calls."""
tokenizer: Any
"""The tokenizer to use for the API calls."""
model_name: str = "bigscience/bloom-petals"
"""The model to use."""
temperature: float = 0.7
"""What sampling temperature to use"""
max_new_tokens: int = 256
"""The maximum number of new tokens to generate in the completion."""
top_p: float = 0.9
"""The cumulative probability for top-p sampling."""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens
to keep for top-k-filtering."""
do_sample: bool = True
"""Whether or not to use sampling; use greedy decoding otherwise."""
max_length: Optional[int] = None
"""The maximum length of the sequence to be generated."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call
not explicitly specified."""
huggingface_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingface_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "huggingface_api_key", "HUGGINGFACE_API_KEY")
)
try:
from petals import AutoDistributedModelForCausalLM
from transformers import AutoTokenizer
model_name = values["model_name"]
values["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
values["client"] = AutoDistributedModelForCausalLM.from_pretrained(
model_name
)
values["huggingface_api_key"] = huggingface_api_key.get_secret_value()
except ImportError:
raise ImportError(
"Could not import transformers or petals python package."
"Please install with `pip install -U transformers petals`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Petals API."""
normal_params = {
"temperature": self.temperature,
"max_new_tokens": self.max_new_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
"max_length": self.max_length,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "petals"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Petals API."""
params = self._default_params
params = {**params, **kwargs}
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~baiducloud_bos_file.py | import logging
import os
import tempfile
from typing import Any, Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
logger = logging.getLogger(__name__)
class BaiduBOSFileLoader(BaseLoader):
"""Load from `Baidu Cloud BOS` file."""
def __init__(self, conf: Any, bucket: str, key: str):
"""Initialize with BOS config, bucket and key name.
:param conf(BceClientConfiguration): BOS config.
:param bucket(str): BOS bucket.
:param key(str): BOS file key.
"""
self.conf = conf
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError(
"Please using `pip install bce-python-sdk`"
+ " before import bos related package."
)
# Initialize BOS Client
client = BosClient(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
logger.debug(f"get object key {self.key} to file {file_path}")
client.get_object_to_file(self.bucket, self.key, file_path)
try:
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
return iter(documents)
except Exception as ex:
logger.error(f"load document error = {ex}")
return iter([Document(page_content="")])
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~parsers~grobid.py | import logging
from typing import Dict, Iterator, List, Union
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class ServerUnavailableException(Exception):
"""Exception raised when the Grobid server is unavailable."""
pass
class GrobidParser(BaseBlobParser):
"""Load article `PDF` files using `Grobid`."""
def __init__(
self,
segment_sentences: bool,
grobid_server: str = "http://localhost:8070/api/processFulltextDocument",
) -> None:
self.segment_sentences = segment_sentences
self.grobid_server = grobid_server
try:
requests.get(grobid_server)
except requests.exceptions.RequestException:
logger.error(
"GROBID server does not appear up and running, \
please ensure Grobid is installed and the server is running"
)
raise ServerUnavailableException
def process_xml(
self, file_path: str, xml_data: str, segment_sentences: bool
) -> Iterator[Document]:
"""Process the XML file from Grobin."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"`bs4` package not found, please install it with " "`pip install bs4`"
)
soup = BeautifulSoup(xml_data, "xml")
sections = soup.find_all("div")
title = soup.find_all("title")[0].text
chunks = []
for section in sections:
sect = section.find("head")
if sect is not None:
for i, paragraph in enumerate(section.find_all("p")):
chunk_bboxes = []
paragraph_text = []
for i, sentence in enumerate(paragraph.find_all("s")):
paragraph_text.append(sentence.text)
sbboxes = []
for bbox in sentence.get("coords").split(";"):
box = bbox.split(",")
sbboxes.append(
{
"page": box[0],
"x": box[1],
"y": box[2],
"h": box[3],
"w": box[4],
}
)
chunk_bboxes.append(sbboxes)
if segment_sentences is True:
fpage, lpage = sbboxes[0]["page"], sbboxes[-1]["page"]
sentence_dict = {
"text": sentence.text,
"para": str(i),
"bboxes": [sbboxes],
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(sentence_dict)
if segment_sentences is not True:
fpage, lpage = (
chunk_bboxes[0][0]["page"],
chunk_bboxes[-1][-1]["page"],
)
paragraph_dict = {
"text": "".join(paragraph_text),
"para": str(i),
"bboxes": chunk_bboxes,
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(paragraph_dict)
yield from [
Document(
page_content=chunk["text"],
metadata=dict(
{
"text": str(chunk["text"]),
"para": str(chunk["para"]),
"bboxes": str(chunk["bboxes"]),
"pages": str(chunk["pages"]),
"section_title": str(chunk["section_title"]),
"section_number": str(chunk["section_number"]),
"paper_title": str(title),
"file_path": str(file_path),
}
),
)
for chunk in chunks
]
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
file_path = blob.source
if file_path is None:
raise ValueError("blob.source cannot be None.")
pdf = open(file_path, "rb")
files = {"input": (file_path, pdf, "application/pdf", {"Expires": "0"})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ["generateIDs", "consolidateHeader", "segmentSentences"]:
data[param] = "1"
data["teiCoordinates"] = ["head", "s"]
files = files or {}
r = requests.request(
"POST",
self.grobid_server,
headers=None,
params=None,
files=files,
data=data,
timeout=60,
)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error("GROBID server timed out. Return None.")
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_upstash_redis.py | import json
import pytest
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories.upstash_redis import (
UpstashRedisChatMessageHistory,
)
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
@pytest.mark.requires("upstash_redis")
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup Upstash Redis as a message store
message_history = UpstashRedisChatMessageHistory(
url=URL, token=TOKEN, ttl=10, session_id="my-test-session"
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Redis, so the next test run won't pick it up
memory.chat_memory.clear()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~argilla_callback.py | import os
import warnings
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.outputs import LLMResult
from packaging.version import parse
class ArgillaCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs into Argilla.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset` in
Argilla, please visit
https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
Examples:
>>> from langchain_community.llms import OpenAI
>>> from langchain_community.callbacks import ArgillaCallbackHandler
>>> argilla_callback = ArgillaCallbackHandler(
... dataset_name="my-dataset",
... workspace_name="my-workspace",
... api_url="http://localhost:6900",
... api_key="argilla.apikey",
... )
>>> llm = OpenAI(
... temperature=0,
... callbacks=[argilla_callback],
... verbose=True,
... openai_api_key="API_KEY_HERE",
... )
>>> llm.generate([
... "What is the best NLP-annotation tool out there? (no bias at all)",
... ])
"Argilla, no doubt about it."
"""
REPO_URL: str = "https://github.com/argilla-io/argilla"
ISSUES_URL: str = f"{REPO_URL}/issues"
BLOG_URL: str = "https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html" # noqa: E501
DEFAULT_API_URL: str = "http://localhost:6900"
def __init__(
self,
dataset_name: str,
workspace_name: Optional[str] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
"""Initializes the `ArgillaCallbackHandler`.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset`
in Argilla, please visit
https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
"""
super().__init__()
# Import Argilla (not via `import_argilla` to keep hints in IDEs)
try:
import argilla as rg # noqa: F401
self.ARGILLA_VERSION = rg.__version__
except ImportError:
raise ImportError(
"To use the Argilla callback manager you need to have the `argilla` "
"Python package installed. Please install it with `pip install argilla`"
)
# Check whether the Argilla version is compatible
if parse(self.ARGILLA_VERSION) < parse("1.8.0"):
raise ImportError(
f"The installed `argilla` version is {self.ARGILLA_VERSION} but "
"`ArgillaCallbackHandler` requires at least version 1.8.0. Please "
"upgrade `argilla` with `pip install --upgrade argilla`."
)
# Show a warning message if Argilla will assume the default values will be used
if api_url is None and os.getenv("ARGILLA_API_URL") is None:
warnings.warn(
(
"Since `api_url` is None, and the env var `ARGILLA_API_URL` is not"
f" set, it will default to `{self.DEFAULT_API_URL}`, which is the"
" default API URL in Argilla Quickstart."
),
)
api_url = self.DEFAULT_API_URL
if api_key is None and os.getenv("ARGILLA_API_KEY") is None:
self.DEFAULT_API_KEY = (
"admin.apikey"
if parse(self.ARGILLA_VERSION) < parse("1.11.0")
else "owner.apikey"
)
warnings.warn(
(
"Since `api_key` is None, and the env var `ARGILLA_API_KEY` is not"
f" set, it will default to `{self.DEFAULT_API_KEY}`, which is the"
" default API key in Argilla Quickstart."
),
)
api_url = self.DEFAULT_API_URL
# Connect to Argilla with the provided credentials, if applicable
try:
rg.init(api_key=api_key, api_url=api_url)
except Exception as e:
raise ConnectionError(
f"Could not connect to Argilla with exception: '{e}'.\n"
"Please check your `api_key` and `api_url`, and make sure that "
"the Argilla server is up and running. If the problem persists "
f"please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
# Set the Argilla variables
self.dataset_name = dataset_name
self.workspace_name = workspace_name or rg.get_workspace()
# Retrieve the `FeedbackDataset` from Argilla (without existing records)
try:
extra_args = {}
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
warnings.warn(
f"You have Argilla {self.ARGILLA_VERSION}, but Argilla 1.14.0 or"
" higher is recommended.",
UserWarning,
)
extra_args = {"with_records": False}
self.dataset = rg.FeedbackDataset.from_argilla(
name=self.dataset_name,
workspace=self.workspace_name,
**extra_args,
)
except Exception as e:
raise FileNotFoundError(
f"`FeedbackDataset` retrieval from Argilla failed with exception `{e}`."
f"\nPlease check that the dataset with name={self.dataset_name} in the"
f" workspace={self.workspace_name} exists in advance. If you need help"
" on how to create a `langchain`-compatible `FeedbackDataset` in"
f" Argilla, please visit {self.BLOG_URL}. If the problem persists"
f" please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
supported_fields = ["prompt", "response"]
if supported_fields != [field.name for field in self.dataset.fields]:
raise ValueError(
f"`FeedbackDataset` with name={self.dataset_name} in the workspace="
f"{self.workspace_name} had fields that are not supported yet for the"
f"`langchain` integration. Supported fields are: {supported_fields},"
f" and the current `FeedbackDataset` fields are {[field.name for field in self.dataset.fields]}." # noqa: E501
" For more information on how to create a `langchain`-compatible"
f" `FeedbackDataset` in Argilla, please visit {self.BLOG_URL}."
)
self.prompts: Dict[str, List[str]] = {}
warnings.warn(
(
"The `ArgillaCallbackHandler` is currently in beta and is subject to"
" change based on updates to `langchain`. Please report any issues to"
f" {self.ISSUES_URL} as an `integration` issue."
),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Save the prompts in memory when an LLM starts."""
self.prompts.update({str(kwargs["parent_run_id"] or kwargs["run_id"]): prompts})
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log records to Argilla when an LLM ends."""
# Do nothing if there's a parent_run_id, since we will log the records when
# the chain ends
if kwargs["parent_run_id"]:
return
# Creates the records and adds them to the `FeedbackDataset`
prompts = self.prompts[str(kwargs["run_id"])]
for prompt, generations in zip(prompts, response.generations):
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": generation.text.strip(),
},
}
for generation in generations
]
)
# Pop current run from `self.runs`
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts.
"""
if "input" in inputs:
self.prompts.update(
{
str(kwargs["parent_run_id"] or kwargs["run_id"]): (
inputs["input"]
if isinstance(inputs["input"], list)
else [inputs["input"]]
)
}
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""If either the `parent_run_id` or the `run_id` is in `self.prompts`, then
log the outputs to Argilla, and pop the run from `self.prompts`. The behavior
differs if the output is a list or not.
"""
if not any(
key in self.prompts
for key in [str(kwargs["parent_run_id"]), str(kwargs["run_id"])]
):
return
prompts = self.prompts.get(str(kwargs["parent_run_id"])) or self.prompts.get(
str(kwargs["run_id"])
)
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, list):
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": output["text"].strip(),
},
}
for prompt, output in zip(
prompts, # type: ignore
chain_output_val,
)
]
)
else:
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": " ".join(prompts), # type: ignore
"response": chain_output_val.strip(),
},
}
]
)
# Pop current run from `self.runs`
if str(kwargs["parent_run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["parent_run_id"]))
if str(kwargs["run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
| [
"parent_run_id",
"run_id"
] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~utilities~test_wikipedia_api.py | """Integration test for Wikipedia API Wrapper."""
from typing import List
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.utilities import WikipediaAPIWrapper
@pytest.fixture
def api_client() -> WikipediaAPIWrapper:
return WikipediaAPIWrapper()
def test_run_success(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run("HUNTER X HUNTER")
assert "Yoshihiro Togashi" in output
def test_run_no_result(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert "No good Wikipedia Search Result was found" == output
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(api_client: WikipediaAPIWrapper) -> None:
api_client.load_all_available_meta = True
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_more_docs_success(api_client: WikipediaAPIWrapper) -> None:
top_k_results = 20
api_client = WikipediaAPIWrapper(top_k_results=top_k_results)
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 10
assert len(docs) <= top_k_results
assert_docs(docs, all_meta=False)
def test_load_no_result(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~file_management~copy.py | import shutil
from typing import Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.tools import BaseTool
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileCopyInput(BaseModel):
"""Input for CopyFileTool."""
source_path: str = Field(..., description="Path of the file to copy")
destination_path: str = Field(..., description="Path to save the copied file")
class CopyFileTool(BaseFileToolMixin, BaseTool):
"""Tool that copies a file."""
name: str = "copy_file"
args_schema: Type[BaseModel] = FileCopyInput
description: str = "Create a copy of a file in a specified location"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path", value=destination_path
)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return f"File copied successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"Create a copy of a file in a specified location"
] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_deeplake.py | """Test Deep Lake functionality."""
import pytest
from libs.core.langchain_core.documents import Document
from pytest import FixtureRequest
from langchain_community.vectorstores import DeepLake
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def deeplake_datastore() -> DeepLake:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="./test_path",
texts=texts,
metadatas=metadatas,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
return docsearch
@pytest.fixture(params=["L1", "L2", "max", "cos"])
def distance_metric(request: FixtureRequest) -> str:
return request.param
def test_deeplake() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_deeplake_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_deeplakewith_persistence() -> None:
"""Test end to end construction and search, with persistence."""
import deeplake
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.delete_dataset()
# Persist doesn't need to be called again
# Data will be automatically persisted on object deletion
# Or on program exit
def test_deeplake_overwrite_flag() -> None:
"""Test overwrite behavior"""
import deeplake
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (implicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (explicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=False,
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with overwrite
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
with pytest.raises(ValueError):
output = docsearch.similarity_search("foo", k=1)
def test_similarity_search(deeplake_datastore: DeepLake, distance_metric: str) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo", k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
tql_query = (
f"SELECT * WHERE "
f"id=='{deeplake_datastore.vectorstore.dataset.id[0].numpy()[0]}'"
)
output = deeplake_datastore.similarity_search(
query="foo", tql_query=tql_query, k=1, distance_metric=distance_metric
)
assert len(output) == 1
deeplake_datastore.delete_dataset()
def test_similarity_search_by_vector(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search by vector."""
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.similarity_search_by_vector(
embeddings[1], k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_similarity_search_with_score(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search with score."""
output, score = deeplake_datastore.similarity_search_with_score(
"foo", k=1, distance_metric=distance_metric
)[0]
assert output == Document(page_content="foo", metadata={"page": "0"})
if distance_metric == "cos":
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
def test_similarity_search_with_filter(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo",
k=1,
distance_metric=distance_metric,
filter={"metadata": {"page": "1"}},
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_max_marginal_relevance_search(deeplake_datastore: DeepLake) -> None:
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search("foo", k=1, fetch_k=2)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(
embeddings[0], k=1, fetch_k=2
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_ids(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()["value"][0]
deeplake_datastore.delete(ids=[id])
assert (
deeplake_datastore.similarity_search(
"foo", k=1, filter={"metadata": {"page": "0"}}
)
== []
)
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_filter(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
deeplake_datastore.delete(filter={"metadata": {"page": "1"}})
assert (
deeplake_datastore.similarity_search(
"bar", k=1, filter={"metadata": {"page": "1"}}
)
== []
)
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
def test_delete_by_path(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
import deeplake
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
def test_add_texts(deeplake_datastore: DeepLake) -> None:
"""Test add_texts dataset."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
deeplake_datastore.add_texts(
texts=texts,
metadatas=metadatas,
)
with pytest.raises(TypeError):
deeplake_datastore.add_texts(
texts=texts,
metada=metadatas,
)
def test_ids_backwards_compatibility() -> None:
"""Test that ids are backwards compatible."""
db = DeepLake(
dataset_path="mem://test_path",
embedding_function=FakeEmbeddings(),
tensor_params=[
{"name": "ids", "htype": "text"},
{"name": "text", "htype": "text"},
{"name": "embedding", "htype": "embedding"},
{"name": "metadata", "htype": "json"},
],
)
db.vectorstore.add(
ids=["1", "2", "3"],
text=["foo", "bar", "baz"],
embedding=FakeEmbeddings().embed_documents(["foo", "bar", "baz"]),
metadata=[{"page": str(i)} for i in range(3)],
)
output = db.similarity_search("foo", k=1)
assert len(output) == 1
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~question_answering~map_rerank_prompt.py | # flake8: noqa
from langchain.output_parsers.regex import RegexParser
from libs.core.langchain_core.prompts import PromptTemplate
output_parser = RegexParser(
regex=r"(.*?)\nScore: (\d*)",
output_keys=["answer", "score"],
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:
Question: [question here]
Helpful Answer: [answer here]
Score: [score between 0 and 100]
How to determine the score:
- Higher is a better answer
- Better responds fully to the asked question, with sufficient level of detail
- If you do not know the answer based on the context, that should be a score of 0
- Don't be overconfident!
Example #1
Context:
---------
Apples are red
---------
Question: what color are apples?
Helpful Answer: red
Score: 100
Example #2
Context:
---------
it was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv
---------
Question: what type was the car?
Helpful Answer: a sports car or an suv
Score: 60
Example #3
Context:
---------
Pears are either red or orange
---------
Question: what color are apples?
Helpful Answer: This document does not answer the question
Score: 0
Begin!
Context:
---------
{context}
---------
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"],
output_parser=output_parser,
)
| [
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
"context",
"question",
"t know the answer, just say that you don"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~playwright~navigate_back.py | from __future__ import annotations
from typing import Optional, Type
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateBackTool(BaseBrowserTool):
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = BaseModel
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
| [
"Navigate back to the previous page in the browser history"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_transformers~nuclia_text_transform.py | import asyncio
import json
import uuid
from typing import Any, Sequence
from libs.core.langchain_core.documents import BaseDocumentTransformer, Document
from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
class NucliaTextTransformer(BaseDocumentTransformer):
"""
The Nuclia Understanding API splits into paragraphs and sentences,
identifies entities, provides a summary of the text and generates
embeddings for all sentences.
"""
def __init__(self, nua: NucliaUnderstandingAPI):
self.nua = nua
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
tasks = [
self.nua.arun(
{
"action": "push",
"id": str(uuid.uuid4()),
"text": doc.page_content,
"path": None,
}
)
for doc in documents
]
results = await asyncio.gather(*tasks)
for doc, result in zip(documents, results):
obj = json.loads(result)
metadata = {
"file": obj["file_extracted_data"][0],
"metadata": obj["field_metadata"][0],
}
doc.metadata["nuclia"] = metadata
return documents
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~docugami.py | import hashlib
import io
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain_community.document_loaders.base import BaseLoader
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
ID_KEY = "id"
DOCUMENT_SOURCE_KEY = "source"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
class DocugamiLoader(BaseLoader, BaseModel):
"""Load from `Docugami`.
To use, you should have the ``dgml-utils`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
"""The Docugami API endpoint to use."""
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
"""The Docugami API access token to use."""
max_text_length = 4096
"""Max length of chunk text returned."""
min_text_length: int = 32
"""Threshold under which chunks are appended to next to avoid over-chunking."""
max_metadata_length = 512
"""Max length of metadata text returned."""
include_xml_tags: bool = False
"""Set to true for XML tags in chunk output text."""
parent_hierarchy_levels: int = 0
"""Set appropriately to get parent chunks using the chunk hierarchy."""
parent_id_key: str = "doc_id"
"""Metadata key for parent doc ID."""
sub_chunk_tables: bool = False
"""Set to True to return sub-chunks within tables."""
whitespace_normalize_text: bool = True
"""Set to False if you want to full whitespace formatting in the original
XML doc, including indentation."""
docset_id: Optional[str]
"""The Docugami API docset ID to use."""
document_ids: Optional[Sequence[str]]
"""The Docugami API document IDs to use."""
file_paths: Optional[Sequence[Union[Path, str]]]
"""The local file paths to use."""
include_project_metadata_in_doc_metadata: bool = True
"""Set to True if you want to include the project metadata in the doc metadata."""
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self,
content: bytes,
document_name: Optional[str] = None,
additional_doc_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
try:
from dgml_utils.models import Chunk
from dgml_utils.segmentation import get_chunks
except ImportError:
raise ImportError(
"Could not import from dgml-utils python package. "
"Please install it with `pip install dgml-utils`."
)
def _build_framework_chunk(dg_chunk: Chunk) -> Document:
# Stable IDs for chunks with the same text.
_hashed_id = hashlib.md5(dg_chunk.text.encode()).hexdigest()
metadata = {
XPATH_KEY: dg_chunk.xpath,
ID_KEY: _hashed_id,
DOCUMENT_NAME_KEY: document_name,
DOCUMENT_SOURCE_KEY: document_name,
STRUCTURE_KEY: dg_chunk.structure,
TAG_KEY: dg_chunk.tag,
}
text = dg_chunk.text
if additional_doc_metadata:
if self.include_project_metadata_in_doc_metadata:
metadata.update(additional_doc_metadata)
return Document(
page_content=text[: self.max_text_length],
metadata=metadata,
)
# Parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
dg_chunks = get_chunks(
root,
min_text_length=self.min_text_length,
max_text_length=self.max_text_length,
whitespace_normalize_text=self.whitespace_normalize_text,
sub_chunk_tables=self.sub_chunk_tables,
include_xml_tags=self.include_xml_tags,
parent_hierarchy_levels=self.parent_hierarchy_levels,
)
framework_chunks: Dict[str, Document] = {}
for dg_chunk in dg_chunks:
framework_chunk = _build_framework_chunk(dg_chunk)
chunk_id = framework_chunk.metadata.get(ID_KEY)
if chunk_id:
framework_chunks[chunk_id] = framework_chunk
if dg_chunk.parent:
framework_parent_chunk = _build_framework_chunk(dg_chunk.parent)
parent_id = framework_parent_chunk.metadata.get(ID_KEY)
if parent_id and framework_parent_chunk.page_content:
framework_chunk.metadata[self.parent_id_key] = parent_id
framework_chunks[parent_id] = framework_parent_chunk
return list(framework_chunks.values())
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get(ID_KEY)
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
per_file_metadata: Dict = {}
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
elif response.status_code == 404:
# Not found is ok, just means no published projects
return per_file_metadata
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == "report-values.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc[ID_KEY]
metadata: Dict = {}
# The evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//pr:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./pr:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value[: self.max_metadata_length]
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self,
document_id: str,
docset_id: str,
document_name: Optional[str] = None,
additional_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Load chunks for a document."""
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(
content=response.content,
document_name=document_name,
additional_doc_metadata=additional_metadata,
)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# Remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d[ID_KEY] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata: Dict[str, Dict] = {}
if _project_details and self.include_project_metadata_in_doc_metadata:
# If there are any projects for this docset and the caller requested
# project metadata, load it.
for project in _project_details:
metadata = self._metadata_for_project(project)
for file_id in metadata:
if file_id not in combined_project_metadata:
combined_project_metadata[file_id] = metadata[file_id]
else:
combined_project_metadata[file_id].update(metadata[file_id])
for doc in _document_details:
doc_id = doc[ID_KEY]
doc_name = doc.get(DOCUMENT_NAME_KEY)
doc_metadata = combined_project_metadata.get(doc_id)
chunks += self._load_chunks_for_document(
document_id=doc_id,
docset_id=self.docset_id,
document_name=doc_name,
additional_metadata=doc_metadata,
)
elif self.file_paths:
# Local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
content=file.read(),
document_name=path.name,
)
return chunks
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~symblai_nebula.py | import json
import logging
from typing import Any, Callable, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from requests import ConnectTimeout, ReadTimeout, RequestException
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
DEFAULT_NEBULA_SERVICE_URL = "https://api-nebula.symbl.ai"
DEFAULT_NEBULA_SERVICE_PATH = "/v1/model/generate"
logger = logging.getLogger(__name__)
class Nebula(LLM):
"""Nebula Service models.
To use, you should have the environment variable ``NEBULA_SERVICE_URL``,
``NEBULA_SERVICE_PATH`` and ``NEBULA_API_KEY`` set with your Nebula
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Nebula
nebula = Nebula(
nebula_service_url="NEBULA_SERVICE_URL",
nebula_service_path="NEBULA_SERVICE_PATH",
nebula_api_key="NEBULA_API_KEY",
)
""" # noqa: E501
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
"""Optional"""
nebula_service_url: Optional[str] = None
nebula_service_path: Optional[str] = None
nebula_api_key: Optional[SecretStr] = None
model: Optional[str] = None
max_new_tokens: Optional[int] = 128
temperature: Optional[float] = 0.6
top_p: Optional[float] = 0.95
repetition_penalty: Optional[float] = 1.0
top_k: Optional[int] = 1
stop_sequences: Optional[List[str]] = None
max_retries: Optional[int] = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nebula_service_url = get_from_dict_or_env(
values,
"nebula_service_url",
"NEBULA_SERVICE_URL",
DEFAULT_NEBULA_SERVICE_URL,
)
nebula_service_path = get_from_dict_or_env(
values,
"nebula_service_path",
"NEBULA_SERVICE_PATH",
DEFAULT_NEBULA_SERVICE_PATH,
)
nebula_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "nebula_api_key", "NEBULA_API_KEY", None)
)
if nebula_service_url.endswith("/"):
nebula_service_url = nebula_service_url[:-1]
if not nebula_service_path.startswith("/"):
nebula_service_path = "/" + nebula_service_path
values["nebula_service_url"] = nebula_service_url
values["nebula_service_path"] = nebula_service_path
values["nebula_api_key"] = nebula_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"nebula_service_url": self.nebula_service_url,
"nebula_service_path": self.nebula_service_path,
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nebula"
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop_sequences
return {**params, **kwargs}
@staticmethod
def _process_response(response: Any, stop: Optional[List[str]]) -> str:
text = response["output"]["text"]
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = completion_with_retry(
self,
prompt=prompt,
params=params,
url=f"{self.nebula_service_url}{self.nebula_service_path}",
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
def make_request(
self: Nebula,
prompt: str,
url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}",
params: Optional[Dict] = None,
) -> Any:
"""Generate text from the model."""
params = params or {}
api_key = None
if self.nebula_api_key is not None:
api_key = self.nebula_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"ApiKey": f"{api_key}",
}
body = {"prompt": prompt}
# add params to body
for key, value in params.items():
body[key] = value
# make request
response = requests.post(url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
return json.loads(response.text)
def _create_retry_decorator(llm: Nebula) -> Callable[[Any], Any]:
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
max_retries = llm.max_retries if llm.max_retries is not None else 3
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout))
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Nebula, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~tencent_cos_file.py | import os
import tempfile
from typing import Any, Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class TencentCOSFileLoader(BaseLoader):
"""Load from `Tencent Cloud COS` file."""
def __init__(self, conf: Any, bucket: str, key: str):
"""Initialize with COS config, bucket and key name.
:param conf(CosConfig): COS config.
:param bucket(str): COS bucket.
:param key(str): COS file key.
"""
self.conf = conf
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
"Could not import cos-python-sdk-v5 python package. "
"Please install it with `pip install cos-python-sdk-v5`."
)
# initialize a client
client = CosS3Client(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
client.download_file(
Bucket=self.bucket, Key=self.key, DestFilePath=file_path
)
loader = UnstructuredFileLoader(file_path)
# UnstructuredFileLoader not implement lazy_load yet
return iter(loader.load())
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~callbacks~fake_callback_handler.py | """A fake callback handler for testing purposes."""
from itertools import chain
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from libs.core.langchain_core.messages import BaseMessage
from libs.core.langchain_core.pydantic_v1 import BaseModel
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
class BaseFakeCallbackHandler(BaseModel):
"""Base fake callback handler for testing."""
starts: int = 0
ends: int = 0
errors: int = 0
text: int = 0
ignore_llm_: bool = False
ignore_chain_: bool = False
ignore_agent_: bool = False
ignore_retriever_: bool = False
ignore_chat_model_: bool = False
# to allow for similar callback handlers that are not technicall equal
fake_id: Union[str, None] = None
# add finer-grained counters for easier debugging of failing tests
chain_starts: int = 0
chain_ends: int = 0
llm_starts: int = 0
llm_ends: int = 0
llm_streams: int = 0
tool_starts: int = 0
tool_ends: int = 0
agent_actions: int = 0
agent_ends: int = 0
chat_model_starts: int = 0
retriever_starts: int = 0
retriever_ends: int = 0
retriever_errors: int = 0
retries: int = 0
class BaseFakeCallbackHandlerMixin(BaseFakeCallbackHandler):
"""Base fake callback handler mixin for testing."""
def on_llm_start_common(self) -> None:
self.llm_starts += 1
self.starts += 1
def on_llm_end_common(self) -> None:
self.llm_ends += 1
self.ends += 1
def on_llm_error_common(self) -> None:
self.errors += 1
def on_llm_new_token_common(self) -> None:
self.llm_streams += 1
def on_retry_common(self) -> None:
self.retries += 1
def on_chain_start_common(self) -> None:
self.chain_starts += 1
self.starts += 1
def on_chain_end_common(self) -> None:
self.chain_ends += 1
self.ends += 1
def on_chain_error_common(self) -> None:
self.errors += 1
def on_tool_start_common(self) -> None:
self.tool_starts += 1
self.starts += 1
def on_tool_end_common(self) -> None:
self.tool_ends += 1
self.ends += 1
def on_tool_error_common(self) -> None:
self.errors += 1
def on_agent_action_common(self) -> None:
self.agent_actions += 1
self.starts += 1
def on_agent_finish_common(self) -> None:
self.agent_ends += 1
self.ends += 1
def on_chat_model_start_common(self) -> None:
self.chat_model_starts += 1
self.starts += 1
def on_text_common(self) -> None:
self.text += 1
def on_retriever_start_common(self) -> None:
self.starts += 1
self.retriever_starts += 1
def on_retriever_end_common(self) -> None:
self.ends += 1
self.retriever_ends += 1
def on_retriever_error_common(self) -> None:
self.errors += 1
self.retriever_errors += 1
class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_start_common()
def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_new_token_common()
def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_end_common()
def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_error_common()
def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_start_common()
def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_end_common()
def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_error_common()
def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_start_common()
def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_end_common()
def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_error_common()
def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_action_common()
def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_finish_common()
def on_text(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_text_common()
def on_retriever_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_start_common()
def on_retriever_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_end_common()
def on_retriever_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_error_common()
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler":
return self
class FakeCallbackHandlerWithChatStart(FakeCallbackHandler):
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
assert all(isinstance(m, BaseMessage) for m in chain(*messages))
self.on_chat_model_start_common()
class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake async callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
async def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
async def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_start_common()
async def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_new_token_common()
async def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_end_common()
async def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_error_common()
async def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_start_common()
async def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_end_common()
async def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_error_common()
async def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_start_common()
async def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_end_common()
async def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_error_common()
async def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_action_common()
async def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_finish_common()
async def on_text(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_text_common()
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler":
return self
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~output_parsers~structured.py | from __future__ import annotations
from typing import Any, List
from libs.core.langchain_core.output_parsers import BaseOutputParser
from libs.core.langchain_core.pydantic_v1 import BaseModel
from langchain.output_parsers.format_instructions import (
STRUCTURED_FORMAT_INSTRUCTIONS,
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS,
)
from langchain.output_parsers.json import parse_and_check_json_markdown
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
"""A schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredOutputParser(BaseOutputParser):
"""Parse the output of an LLM call to a structured output."""
response_schemas: List[ResponseSchema]
"""The schemas for the response."""
@classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self, only_json: bool = False) -> str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions())
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
else:
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> Any:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
| [
"\t\"{name}\": {type} // {description}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~playwright~current_page.py | from __future__ import annotations
from typing import Optional, Type
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class CurrentWebPageTool(BaseBrowserTool):
"""Tool for getting the URL of the current webpage."""
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = BaseModel
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
| [
"Returns the URL of the current page"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~agents~loading.py | """Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from libs.core.langchain_core.language_models import BaseLanguageModel
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.tools import Tool
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
from langchain.utilities.loading import try_load_from_hub
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {"json", "yaml"}
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", valid_suffixes
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~notebook.py | """Loads .ipynb notebook files."""
import json
from pathlib import Path
from typing import Any, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_cells(
cell: dict, include_outputs: bool, max_output_length: int, traceback: bool
) -> str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell["cell_type"]
source = cell["source"]
output = cell["outputs"]
if include_outputs and cell_type == "code" and output:
if "ename" in output[0].keys():
error_name = output[0]["ename"]
error_value = output[0]["evalue"]
if traceback:
traceback = output[0]["traceback"]
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f" with description '{error_value}'\n"
f"and traceback '{traceback}'\n\n"
)
else:
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f"with description '{error_value}'\n\n"
)
elif output[0]["output_type"] == "stream":
output = output[0]["text"]
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with "
f"output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ""
def remove_newlines(x: Any) -> Any:
"""Recursively remove newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace("\n", "")
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x
class NotebookLoader(BaseLoader):
"""Load `Jupyter notebook` (.ipynb) files."""
def __init__(
self,
path: str,
include_outputs: bool = False,
max_output_length: int = 10,
remove_newline: bool = False,
traceback: bool = False,
):
"""Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
def load(
self,
) -> List[Document]:
"""Load documents."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is needed for Notebook Loader, "
"please install with `pip install pandas`"
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
data = pd.json_normalize(d["cells"])
filtered_data = data[["cell_type", "source", "outputs"]]
if self.remove_newline:
filtered_data = filtered_data.applymap(remove_newlines)
text = filtered_data.apply(
lambda x: concatenate_cells(
x, self.include_outputs, self.max_output_length, self.traceback
),
axis=1,
).str.cat(sep=" ")
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~manager.py | from __future__ import annotations
import logging
from contextlib import contextmanager
from contextvars import ContextVar
from typing import (
Generator,
Optional,
)
from libs.core.langchain_core.tracers.context import register_configure_hook
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
from langchain_community.callbacks.tracers.wandb import WandbTracer
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
register_configure_hook(openai_callback_var, True)
register_configure_hook(
wandb_tracing_callback_var, True, WandbTracer, "LANGCHAIN_WANDB_TRACING"
)
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~obs_directory.py | # coding:utf-8
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.obs_file import OBSFileLoader
class OBSDirectoryLoader(BaseLoader):
"""Load from `Huawei OBS directory`."""
def __init__(
self,
bucket: str,
endpoint: str,
config: Optional[dict] = None,
prefix: str = "",
):
"""Initialize the OBSDirectoryLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
endpoint (str): The endpoint URL of your OBS bucket.
config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
prefix (str, optional): The prefix to be added to the OBS key. Defaults to "".
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSDirectoryLoader:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
```
directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix")
""" # noqa: E501
try:
from obs import ObsClient
except ImportError:
raise ImportError(
"Could not import esdk-obs-python python package. "
"Please install it with `pip install esdk-obs-python`."
)
if not config:
config = dict()
if config.get("get_token_from_ecs"):
self.client = ObsClient(server=endpoint, security_provider_policy="ECS")
else:
self.client = ObsClient(
access_key_id=config.get("ak"),
secret_access_key=config.get("sk"),
security_token=config.get("token"),
server=endpoint,
)
self.bucket = bucket
self.prefix = prefix
def load(self) -> List[Document]:
"""Load documents."""
max_num = 1000
mark = None
docs = []
while True:
resp = self.client.listObjects(
self.bucket, prefix=self.prefix, marker=mark, max_keys=max_num
)
if resp.status < 300:
for content in resp.body.contents:
loader = OBSFileLoader(self.bucket, content.key, client=self.client)
docs.extend(loader.load())
if resp.body.is_truncated is True:
mark = resp.body.next_marker
else:
break
return docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~llms~fake_chat_model.py | """Fake Chat Model wrapper for testing purposes."""
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models.chat_models import SimpleChatModel
from libs.core.langchain_core.messages import AIMessage, BaseMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatResult
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
class FakeChatModel(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return "fake response"
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-chat-model"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {"key": "fake"}
| [
"fake response"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~astradb.py | from __future__ import annotations
import uuid
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
)
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils.iter import batch_iterate
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
ADBVST = TypeVar("ADBVST", bound="AstraDB")
T = TypeVar("T")
U = TypeVar("U")
DocDict = Dict[str, Any] # dicts expressing entries to insert
# Batch/concurrency default values (if parameters not provided):
# Size of batches for bulk insertions:
# (20 is the max batch size for the HTTP API at the time of writing)
DEFAULT_BATCH_SIZE = 20
# Number of threads to insert batches concurrently:
DEFAULT_BULK_INSERT_BATCH_CONCURRENCY = 16
# Number of threads in a batch to insert pre-existing entries:
DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY = 10
# Number of threads (for deleting multiple rows concurrently):
DEFAULT_BULK_DELETE_CONCURRENCY = 20
def _unique_list(lst: List[T], key: Callable[[T], U]) -> List[T]:
visited_keys: Set[U] = set()
new_lst = []
for item in lst:
item_key = key(item)
if item_key not in visited_keys:
visited_keys.add(item_key)
new_lst.append(item)
return new_lst
class AstraDB(VectorStore):
"""Wrapper around DataStax Astra DB for vector-store workloads.
To use it, you need a recent installation of the `astrapy` library
and an Astra DB cloud database.
For quickstart and details, visit:
docs.datastax.com/en/astra/home/astra.html
Example:
.. code-block:: python
from langchain_community.vectorstores import AstraDB
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AstraDB(
embedding=embeddings,
collection_name="my_store",
token="AstraCS:...",
api_endpoint="https://<DB-ID>-us-east1.apps.astra.datastax.com"
)
vectorstore.add_texts(["Giraffes", "All good here"])
results = vectorstore.similarity_search("Everything's ok", k=1)
Constructor Args (only keyword-arguments accepted):
embedding (Embeddings): embedding function to use.
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
metric (Optional[str]): similarity function to use out of those
available in Astra DB. If left out, it will use Astra DB API's
defaults (i.e. "cosine" - but, for performance reasons,
"dot_product" is suggested if embeddings are normalized to one).
Advanced arguments (coming with sensible defaults):
batch_size (Optional[int]): Size of batches for bulk insertions.
bulk_insert_batch_concurrency (Optional[int]): Number of threads
to insert batches concurrently.
bulk_insert_overwrite_concurrency (Optional[int]): Number of
threads in a batch to insert pre-existing entries.
bulk_delete_concurrency (Optional[int]): Number of threads
(for deleting multiple rows concurrently).
pre_delete_collection (Optional[bool]): whether to delete the collection
before creating it. If False and the collection already exists,
the collection will be used as is.
A note on concurrency: as a rule of thumb, on a typical client machine
it is suggested to keep the quantity
bulk_insert_batch_concurrency * bulk_insert_overwrite_concurrency
much below 1000 to avoid exhausting the client multithreading/networking
resources. The hardcoded defaults are somewhat conservative to meet
most machines' specs, but a sensible choice to test may be:
bulk_insert_batch_concurrency = 80
bulk_insert_overwrite_concurrency = 10
A bit of experimentation is required to nail the best results here,
depending on both the machine/network specs and the expected workload
(specifically, how often a write is an update of an existing id).
Remember you can pass concurrency settings to individual calls to
add_texts and add_documents as well.
"""
@staticmethod
def _filter_to_metadata(filter_dict: Optional[Dict[str, str]]) -> Dict[str, Any]:
if filter_dict is None:
return {}
else:
return {f"metadata.{mdk}": mdv for mdk, mdv in filter_dict.items()}
def __init__(
self,
*,
embedding: Embeddings,
collection_name: str,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[Any] = None, # 'astrapy.db.AstraDB' if passed
namespace: Optional[str] = None,
metric: Optional[str] = None,
batch_size: Optional[int] = None,
bulk_insert_batch_concurrency: Optional[int] = None,
bulk_insert_overwrite_concurrency: Optional[int] = None,
bulk_delete_concurrency: Optional[int] = None,
pre_delete_collection: bool = False,
) -> None:
"""
Create an AstraDB vector store object. See class docstring for help.
"""
try:
from astrapy.db import (
AstraDB as LibAstraDB,
)
from astrapy.db import (
AstraDBCollection as LibAstraDBCollection,
)
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import a recent astrapy python package. "
"Please install it with `pip install --upgrade astrapy`."
)
# Conflicting-arg checks:
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing "
"'token' and 'api_endpoint'."
)
self.embedding = embedding
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
# Concurrency settings
self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE
self.bulk_insert_batch_concurrency: int = (
bulk_insert_batch_concurrency or DEFAULT_BULK_INSERT_BATCH_CONCURRENCY
)
self.bulk_insert_overwrite_concurrency: int = (
bulk_insert_overwrite_concurrency
or DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY
)
self.bulk_delete_concurrency: int = (
bulk_delete_concurrency or DEFAULT_BULK_DELETE_CONCURRENCY
)
# "vector-related" settings
self._embedding_dimension: Optional[int] = None
self.metric = metric
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(
token=self.token,
api_endpoint=self.api_endpoint,
namespace=self.namespace,
)
if not pre_delete_collection:
self._provision_collection()
else:
self.clear()
self.collection = LibAstraDBCollection(
collection_name=self.collection_name,
astra_db=self.astra_db,
)
def _get_embedding_dimension(self) -> int:
if self._embedding_dimension is None:
self._embedding_dimension = len(
self.embedding.embed_query("This is a sample sentence.")
)
return self._embedding_dimension
def _drop_collection(self) -> None:
"""
Drop the collection from storage.
This is meant as an internal-usage method, no members
are set other than actual deletion on the backend.
"""
_ = self.astra_db.delete_collection(
collection_name=self.collection_name,
)
return None
def _provision_collection(self) -> None:
"""
Run the API invocation to create the collection on the backend.
Internal-usage method, no object members are set,
other than working on the underlying actual storage.
"""
_ = self.astra_db.create_collection(
dimension=self._get_embedding_dimension(),
collection_name=self.collection_name,
metric=self.metric,
)
return None
@property
def embeddings(self) -> Embeddings:
return self.embedding
@staticmethod
def _dont_flip_the_cos_score(similarity0to1: float) -> float:
"""Keep similarity from client unchanged ad it's in [0:1] already."""
return similarity0to1
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The underlying API calls already returns a "score proper",
i.e. one in [0, 1] where higher means more *similar*,
so here the final score transformation is not reversing the interval:
"""
return self._dont_flip_the_cos_score
def clear(self) -> None:
"""Empty the collection of all its stored entries."""
self._drop_collection()
self._provision_collection()
return None
def delete_by_document_id(self, document_id: str) -> bool:
"""
Remove a single document from the store, given its document_id (str).
Return True if a document has indeed been deleted, False if ID not found.
"""
deletion_response = self.collection.delete(document_id)
return ((deletion_response or {}).get("status") or {}).get(
"deletedCount", 0
) == 1
def delete(
self,
ids: Optional[List[str]] = None,
concurrency: Optional[int] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by vector ids.
Args:
ids (Optional[List[str]]): List of ids to delete.
concurrency (Optional[int]): max number of threads issuing
single-doc delete requests. Defaults to instance-level setting.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if kwargs:
warnings.warn(
"Method 'delete' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(kwargs.keys()))}), "
"which will be ignored."
)
if ids is None:
raise ValueError("No ids provided to delete.")
_max_workers = concurrency or self.bulk_delete_concurrency
with ThreadPoolExecutor(max_workers=_max_workers) as tpe:
_ = list(
tpe.map(
self.delete_by_document_id,
ids,
)
)
return True
def delete_collection(self) -> None:
"""
Completely delete the collection from the database (as opposed
to 'clear()', which empties it only).
Stored data is lost and unrecoverable, resources are freed.
Use with caution.
"""
self._drop_collection()
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
*,
batch_size: Optional[int] = None,
batch_concurrency: Optional[int] = None,
overwrite_concurrency: Optional[int] = None,
**kwargs: Any,
) -> List[str]:
"""Run texts through the embeddings and add them to the vectorstore.
If passing explicit ids, those entries whose id is in the store already
will be replaced.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of ids.
batch_size (Optional[int]): Number of documents in each API call.
Check the underlying Astra DB HTTP API specs for the max value
(20 at the time of writing this). If not provided, defaults
to the instance-level setting.
batch_concurrency (Optional[int]): number of threads to process
insertion batches concurrently. Defaults to instance-level
setting if not provided.
overwrite_concurrency (Optional[int]): number of threads to process
pre-existing documents in each batch (which require individual
API calls). Defaults to instance-level setting if not provided.
A note on metadata: there are constraints on the allowed field names
in this dictionary, coming from the underlying Astra DB API.
For instance, the `$` (dollar sign) cannot be used in the dict keys.
See this document for details:
docs.datastax.com/en/astra-serverless/docs/develop/dev-with-json.html
Returns:
List[str]: List of ids of the added texts.
"""
if kwargs:
warnings.warn(
"Method 'add_texts' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(kwargs.keys()))}), "
"which will be ignored."
)
_texts = list(texts)
if ids is None:
ids = [uuid.uuid4().hex for _ in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
#
embedding_vectors = self.embedding.embed_documents(_texts)
documents_to_insert = [
{
"content": b_txt,
"_id": b_id,
"$vector": b_emb,
"metadata": b_md,
}
for b_txt, b_emb, b_id, b_md in zip(
_texts,
embedding_vectors,
ids,
metadatas,
)
]
# make unique by id, keeping the last
uniqued_documents_to_insert = _unique_list(
documents_to_insert[::-1],
lambda document: document["_id"],
)[::-1]
all_ids = []
def _handle_batch(document_batch: List[DocDict]) -> List[str]:
im_result = self.collection.insert_many(
documents=document_batch,
options={"ordered": False},
partial_failures_allowed=True,
)
if "status" not in im_result:
raise ValueError(
f"API Exception while running bulk insertion: {str(im_result)}"
)
batch_inserted = im_result["status"]["insertedIds"]
# estimation of the preexisting documents that failed
missed_inserted_ids = {
document["_id"] for document in document_batch
} - set(batch_inserted)
errors = im_result.get("errors", [])
# careful for other sources of error other than "doc already exists"
num_errors = len(errors)
unexpected_errors = any(
error.get("errorCode") != "DOCUMENT_ALREADY_EXISTS" for error in errors
)
if num_errors != len(missed_inserted_ids) or unexpected_errors:
raise ValueError(
f"API Exception while running bulk insertion: {str(errors)}"
)
# deal with the missing insertions as upserts
missing_from_batch = [
document
for document in document_batch
if document["_id"] in missed_inserted_ids
]
def _handle_missing_document(missing_document: DocDict) -> str:
replacement_result = self.collection.find_one_and_replace(
filter={"_id": missing_document["_id"]},
replacement=missing_document,
)
return replacement_result["data"]["document"]["_id"]
_u_max_workers = (
overwrite_concurrency or self.bulk_insert_overwrite_concurrency
)
with ThreadPoolExecutor(max_workers=_u_max_workers) as tpe2:
batch_replaced = list(
tpe2.map(
_handle_missing_document,
missing_from_batch,
)
)
upsert_ids = batch_inserted + batch_replaced
return upsert_ids
_b_max_workers = batch_concurrency or self.bulk_insert_batch_concurrency
with ThreadPoolExecutor(max_workers=_b_max_workers) as tpe:
all_ids_nested = tpe.map(
_handle_batch,
batch_iterate(
batch_size or self.batch_size,
uniqued_documents_to_insert,
),
)
all_ids = [iid for id_list in all_ids_nested for iid in id_list]
return all_ids
def similarity_search_with_score_id_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
metadata_parameter = self._filter_to_metadata(filter)
#
hits = list(
self.collection.paginated_find(
filter=metadata_parameter,
sort={"$vector": embedding},
options={"limit": k, "includeSimilarity": True},
projection={
"_id": 1,
"content": 1,
"metadata": 1,
},
)
)
#
return [
(
Document(
page_content=hit["content"],
metadata=hit["metadata"],
),
hit["$similarity"],
hit["_id"],
)
for hit in hits
]
def similarity_search_with_score_id(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float, str]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(
embedding=embedding_vector,
k=k,
filter=filter,
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score), the most similar to the query vector.
"""
return [
(doc, score)
for (doc, score, doc_id) in self.similarity_search_with_score_id_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(
embedding_vector,
k,
filter=filter,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
return [
doc
for doc, _ in self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
)
]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(
embedding_vector,
k,
filter=filter,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
metadata_parameter = self._filter_to_metadata(filter)
prefetch_hits = list(
self.collection.paginated_find(
filter=metadata_parameter,
sort={"$vector": embedding},
options={"limit": fetch_k, "includeSimilarity": True},
projection={
"_id": 1,
"content": 1,
"metadata": 1,
"$vector": 1,
},
)
)
mmr_chosen_indices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
[prefetch_hit["$vector"] for prefetch_hit in prefetch_hits],
k=k,
lambda_mult=lambda_mult,
)
mmr_hits = [
prefetch_hit
for prefetch_index, prefetch_hit in enumerate(prefetch_hits)
if prefetch_index in mmr_chosen_indices
]
return [
Document(
page_content=hit["content"],
metadata=hit["metadata"],
)
for hit in mmr_hits
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int = 4): Number of Documents to return.
fetch_k (int = 20): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float = 0.5): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding_vector,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
)
@classmethod
def from_texts(
cls: Type[ADBVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ADBVST:
"""Create an Astra DB vectorstore from raw texts.
Args:
texts (List[str]): the texts to insert.
embedding (Embeddings): the embedding function to use in the store.
metadatas (Optional[List[dict]]): metadata dicts for the texts.
ids (Optional[List[str]]): ids to associate to the texts.
*Additional arguments*: you can pass any argument that you would
to 'add_texts' and/or to the 'AstraDB' class constructor
(see these methods for details). These arguments will be
routed to the respective methods as they are.
Returns:
an `AstraDb` vectorstore.
"""
known_kwargs = {
"collection_name",
"token",
"api_endpoint",
"astra_db_client",
"namespace",
"metric",
"batch_size",
"bulk_insert_batch_concurrency",
"bulk_insert_overwrite_concurrency",
"bulk_delete_concurrency",
"batch_concurrency",
"overwrite_concurrency",
}
if kwargs:
unknown_kwargs = set(kwargs.keys()) - known_kwargs
if unknown_kwargs:
warnings.warn(
"Method 'from_texts' of AstraDB vector store invoked with "
f"unsupported arguments ({', '.join(sorted(unknown_kwargs))}), "
"which will be ignored."
)
collection_name: str = kwargs["collection_name"]
token = kwargs.get("token")
api_endpoint = kwargs.get("api_endpoint")
astra_db_client = kwargs.get("astra_db_client")
namespace = kwargs.get("namespace")
metric = kwargs.get("metric")
astra_db_store = cls(
embedding=embedding,
collection_name=collection_name,
token=token,
api_endpoint=api_endpoint,
astra_db_client=astra_db_client,
namespace=namespace,
metric=metric,
batch_size=kwargs.get("batch_size"),
bulk_insert_batch_concurrency=kwargs.get("bulk_insert_batch_concurrency"),
bulk_insert_overwrite_concurrency=kwargs.get(
"bulk_insert_overwrite_concurrency"
),
bulk_delete_concurrency=kwargs.get("bulk_delete_concurrency"),
)
astra_db_store.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
batch_size=kwargs.get("batch_size"),
batch_concurrency=kwargs.get("batch_concurrency"),
overwrite_concurrency=kwargs.get("overwrite_concurrency"),
)
return astra_db_store
@classmethod
def from_documents(
cls: Type[ADBVST],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> ADBVST:
"""Create an Astra DB vectorstore from a document list.
Utility method that defers to 'from_texts' (see that one).
Args: see 'from_texts', except here you have to supply 'documents'
in place of 'texts' and 'metadatas'.
Returns:
an `AstraDB` vectorstore.
"""
return super().from_documents(documents, embedding, **kwargs)
| [
"1"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~mosaicml.py | from typing import Any, Dict, List, Mapping, Optional, Tuple
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class MosaicMLInstructorEmbeddings(BaseModel, Embeddings):
"""MosaicML embedding service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import MosaicMLInstructorEmbeddings
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
mosaic_llm = MosaicMLInstructorEmbeddings(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict"
)
"""Endpoint URL to use."""
embed_instruction: str = "Represent the document for retrieval: "
"""Instruction used to embed documents."""
query_instruction: str = (
"Represent the question for retrieving supporting documents: "
)
"""Instruction used to embed the query."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"endpoint_url": self.endpoint_url}
def _embed(
self, input: List[Tuple[str, str]], is_retry: bool = False
) -> List[List[float]]:
payload = {"inputs": input}
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._embed(input, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
output_keys = ["data", "output", "outputs"]
for key in output_keys:
if key in parsed_response:
output_item = parsed_response[key]
break
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
if isinstance(output_item, list) and isinstance(output_item[0], list):
embeddings = output_item
else:
embeddings = [output_item]
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MosaicML deployed instructor embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [(self.embed_instruction, text) for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MosaicML deployed instructor embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = (self.query_instruction, text)
embedding = self._embed([instruction_pair])[0]
return embedding
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_mongodb.py | import json
import os
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import MongoDBChatMessageHistory
# Replace these with your mongodb connection string
connection_string = os.environ.get("MONGODB_CONNECTION_STRING", "")
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup MongoDB as a message store
message_history = MongoDBChatMessageHistory(
connection_string=connection_string, session_id="test-session"
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Azure Cosmos DB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~load~test_dump.py | """Test for Serializable base class"""
from typing import Any, Dict
import pytest
from libs.core.langchain_core.load.dump import dumps
from libs.core.langchain_core.load.serializable import Serializable
from libs.core.langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from libs.core.langchain_core.tracers.langchain import LangChainTracer
from langchain.chains.llm import LLMChain
from langchain.chat_models.openai import ChatOpenAI
from langchain.llms.openai import OpenAI
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> Dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> Dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> Dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
@pytest.mark.requires("openai")
def test_serialize_openai_llm(snapshot: Any) -> None:
llm = OpenAI(
model="davinci",
temperature=0.5,
openai_api_key="hello",
# This is excluded from serialization
callbacks=[LangChainTracer()],
)
llm.temperature = 0.7 # this is reflected in serialization
assert dumps(llm, pretty=True) == snapshot
@pytest.mark.requires("openai")
def test_serialize_llmchain(snapshot: Any) -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
@pytest.mark.requires("openai")
def test_serialize_llmchain_env() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = OpenAI(model="davinci", temperature=0.5)
prompt_2 = PromptTemplate.from_template("hello {name}!")
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_serialize_llmchain_chat(snapshot: Any) -> None:
llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = ChatOpenAI(model="davinci", temperature=0.5)
prompt_2 = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None:
llm = OpenAI(
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
| [
"hello {name}!"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~google_finance.py | """Util that calls Google Finance Search."""
from typing import Any, Dict, Optional, cast
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
class GoogleFinanceAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Finance API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleFinanceAPIWrapper
google_Finance = GoogleFinanceAPIWrapper()
google_Finance.run('langchain')
"""
serp_search_engine: Any
serp_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Finance with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_finance",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results = {}
client = self.serp_search_engine(params)
total_results = client.get_dict()
if not total_results:
return "Nothing was found from the query: " + query
markets = total_results.get("markets", {})
res = "\nQuery: " + query + "\n"
if "futures_chain" in total_results:
futures_chain = total_results.get("futures_chain", [])[0]
stock = futures_chain["stock"]
price = futures_chain["price"]
temp = futures_chain["price_movement"]
percentage = temp["percentage"]
movement = temp["movement"]
res += (
f"stock: {stock}\n"
+ f"price: {price}\n"
+ f"percentage: {percentage}\n"
+ f"movement: {movement}\n"
)
else:
res += "No summary information\n"
for key in markets:
if (key == "us") or (key == "asia") or (key == "europe"):
res += key
res += ": price = "
res += str(markets[key][0]["price"])
res += ", movement = "
res += markets[key][0]["price_movement"]["movement"]
res += "\n"
return res
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~test_globals.py | import warnings
from libs.core.langchain_core.globals import get_debug as core_get_debug
from libs.core.langchain_core.globals import get_verbose as core_get_verbose
from libs.core.langchain_core.globals import set_debug as core_set_debug
from libs.core.langchain_core.globals import set_verbose as core_set_verbose
from langchain.globals import get_debug, get_verbose, set_debug, set_verbose
def test_no_warning() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
get_debug()
set_debug(False)
get_verbose()
set_verbose(False)
core_get_debug()
core_set_debug(False)
core_get_verbose()
core_set_verbose(False)
def test_debug_is_settable_directly() -> None:
from libs.core.langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_debug_is_settable_via_setter() -> None:
from libs.core.langchain_core.callbacks.manager import _get_debug
from langchain import globals
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_verbose_is_settable_directly() -> None:
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
def test_verbose_is_settable_via_setter() -> None:
from langchain import globals
from langchain.chains.base import _get_verbosity
previous_value = globals._verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_verbose(not previous_value)
new_value = globals._verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~whatsapp_chat.py | import re
from pathlib import Path
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_rows(date: str, sender: str, text: str) -> str:
"""Combine message information in a readable format ready to be used."""
return f"{sender} on {date}: {text}\n\n"
class WhatsAppChatLoader(BaseLoader):
"""Load `WhatsApp` messages text file."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
text_content = ""
with open(p, encoding="utf8") as f:
lines = f.readlines()
message_line_regex = r"""
\[?
(
\d{1,4}
[\/.]
\d{1,2}
[\/.]
\d{1,4}
,\s
\d{1,2}
:\d{2}
(?:
:\d{2}
)?
(?:[\s_](?:AM|PM))?
)
\]?
[\s-]*
([~\w\s]+)
[:]+
\s
(.+)
"""
ignore_lines = ["This message was deleted", "<Media omitted>"]
for line in lines:
result = re.match(
message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE
)
if result:
date, sender, text = result.groups()
if text not in ignore_lines:
text_content += concatenate_rows(date, sender, text)
metadata = {"source": str(p)}
return [Document(page_content=text_content, metadata=metadata)]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~azureml_endpoint.py | import json
from typing import Any, Dict, List, Optional, cast
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.chat_models import SimpleChatModel
from libs.core.langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from libs.core.langchain_core.pydantic_v1 import SecretStr, validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointClient,
ContentFormatterBase,
)
class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for `LLaMA`."""
SUPPORTED_ROLES: List[str] = ["user", "assistant", "system"]
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> Dict:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
return {
"role": "user",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, AIMessage):
return {
"role": "assistant",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif isinstance(message, SystemMessage):
return {
"role": "system",
"content": ContentFormatterBase.escape_special_characters(content),
}
elif (
isinstance(message, ChatMessage)
and message.role in LlamaContentFormatter.SUPPORTED_ROLES
):
return {
"role": message.role,
"content": ContentFormatterBase.escape_special_characters(content),
}
else:
supported = ",".join(
[role for role in LlamaContentFormatter.SUPPORTED_ROLES]
)
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
def _format_request_payload(
self, messages: List[BaseMessage], model_kwargs: Dict
) -> bytes:
chat_messages = [
LlamaContentFormatter._convert_message_to_dict(message)
for message in messages
]
prompt = json.dumps(
{"input_data": {"input_string": chat_messages, "parameters": model_kwargs}}
)
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs)
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according to the chosen api"""
return str.encode(prompt)
def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)["output"]
class AzureMLChatOnlineEndpoint(SimpleChatModel):
"""`AzureML` Chat models API.
Example:
.. code-block:: python
azure_chat = AzureMLChatOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
"""
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: SecretStr = convert_to_secret_str("")
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exist in environment."""
values["endpoint_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY")
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
http_client = AzureMLEndpointClient(
endpoint_url, values["endpoint_api_key"].get_secret_value()
)
return http_client
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_chat_endpoint"
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
messages: The messages in the conversation with the chat model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter._format_request_payload(
messages, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text
| [
"input_string",
"parameters",
"input_data"
] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~document_loaders~test_mongodb.py | from typing import Dict, List
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.mongodb import MongodbLoader
@pytest.fixture
def raw_docs() -> List[Dict]:
return [
{"_id": "1", "address": {"building": "1", "room": "1"}},
{"_id": "2", "address": {"building": "2", "room": "2"}},
]
@pytest.fixture
def expected_documents() -> List[Document]:
return [
Document(
page_content="{'_id': '1', 'address': {'building': '1', 'room': '1'}}",
metadata={"database": "sample_restaurants", "collection": "restaurants"},
),
Document(
page_content="{'_id': '2', 'address': {'building': '2', 'room': '2'}}",
metadata={"database": "sample_restaurants", "collection": "restaurants"},
),
]
@pytest.mark.requires("motor")
async def test_load_mocked(expected_documents: List[Document]) -> None:
mock_async_load = AsyncMock()
mock_async_load.return_value = expected_documents
mock_find = AsyncMock()
mock_find.return_value = iter(expected_documents)
mock_count_documents = MagicMock()
mock_count_documents.return_value = len(expected_documents)
mock_collection = MagicMock()
mock_collection.find = mock_find
mock_collection.count_documents = mock_count_documents
with patch(
"motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock()
), patch(
"langchain_community.document_loaders.mongodb.MongodbLoader.aload",
new=mock_async_load,
):
loader = MongodbLoader(
"mongodb://localhost:27017", "test_db", "test_collection"
)
loader.collection = mock_collection
documents = await loader.aload()
assert documents == expected_documents
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~meilisearch.py | from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import get_from_env
from libs.core.langchain_core.vectorstores import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain_community.vectorstores import Meilisearch
from langchain_community.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Meilisearch
from langchain_community.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~runnables~hub.py | from typing import Any, Optional
from libs.core.langchain_core.runnables.base import Input, Output, RunnableBindingBase
class HubRunnable(RunnableBindingBase[Input, Output]):
"""
An instance of a runnable stored in the LangChain Hub.
"""
owner_repo_commit: str
def __init__(
self,
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {
"kwargs": {},
"config": {},
**kwargs,
"bound": pulled,
"owner_repo_commit": owner_repo_commit,
}
super().__init__(**super_kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_gpt_router.py | """Test GPTRouter API wrapper."""
from typing import List
import pytest
from libs.core.langchain_core.callbacks import (
CallbackManager,
)
from libs.core.langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from libs.core.langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.gpt_router import GPTRouter, GPTRouterModel
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_gpt_router_call() -> None:
"""Test valid call to GPTRouter."""
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = GPTRouter(models_priority_list=[anthropic_claude])
message = HumanMessage(content="Hello World")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_gpt_router_call_incorrect_model() -> None:
"""Test invalid modelName"""
anthropic_claude = GPTRouterModel(
name="model_does_not_exist", provider_name="anthropic"
)
chat = GPTRouter(models_priority_list=[anthropic_claude])
message = HumanMessage(content="Hello World")
with pytest.raises(Exception):
chat([message])
def test_gpt_router_generate() -> None:
"""Test generate method of GPTRouter."""
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = GPTRouter(models_priority_list=[anthropic_claude])
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="If (5 + x = 18), what is x?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
def test_gpt_router_streaming() -> None:
"""Test streaming tokens from GPTRouter."""
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_gpt_router_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
anthropic_claude = GPTRouterModel(
name="claude-instant-1.2", provider_name="anthropic"
)
chat = GPTRouter(
models_priority_list=[anthropic_claude],
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a 5 line poem.")
chat([message])
assert callback_handler.llm_streams > 1
| [
"If (5 + x = 18), what is x?",
"Write me a 5 line poem.",
"Hello",
"Hello World"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~stochasticai.py | import logging
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class StochasticAI(LLM):
"""StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
"""
api_url: str = ""
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
stochasticai_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
stochasticai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "stochasticai_api_key", "STOCHASTICAI_API_KEY")
)
values["stochasticai_api_key"] = stochasticai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.api_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "stochasticai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to StochasticAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = StochasticAI("Tell me a joke.")
"""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response_post = requests.post(
url=self.api_url,
json={"prompt": prompt, "params": params},
headers={
"apiKey": f"{self.stochasticai_api_key.get_secret_value()}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_post.raise_for_status()
response_post_json = response_post.json()
completed = False
while not completed:
response_get = requests.get(
url=response_post_json["data"]["responseUrl"],
headers={
"apiKey": f"{self.stochasticai_api_key.get_secret_value()}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_get.raise_for_status()
response_get_json = response_get.json()["data"]
text = response_get_json.get("completion")
completed = text is not None
time.sleep(0.5)
text = text[0]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~smith~evaluation~test_runner_utils.py | """Test the LangSmith evaluation helpers."""
import uuid
from datetime import datetime
from typing import Any, Dict, Iterator, List, Optional, Union
from unittest import mock
import pytest
from freezegun import freeze_time
from libs.core.langchain_core.language_models import BaseLanguageModel
from langsmith.client import Client
from langsmith.schemas import Dataset, Example
from langchain.chains.base import Chain
from langchain.chains.transform import TransformChain
from langchain.smith.evaluation.runner_utils import (
InputFormatError,
_get_messages,
_get_prompt,
_run_llm,
_run_llm_or_chain,
_validate_example_inputs_for_chain,
_validate_example_inputs_for_language_model,
arun_on_dataset,
)
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
_CREATED_AT = datetime(2015, 1, 1, 0, 0, 0)
_TENANT_ID = "7a3d2b56-cd5b-44e5-846f-7eb6e8144ce4"
_EXAMPLE_MESSAGE = {
"data": {"content": "Foo", "example": False, "additional_kwargs": {}},
"type": "human",
}
_VALID_MESSAGES = [
{"messages": [_EXAMPLE_MESSAGE], "other_key": "value"},
{"messages": [], "other_key": "value"},
{
"messages": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE]],
"other_key": "value",
},
{"any_key": [_EXAMPLE_MESSAGE]},
{"any_key": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE]]},
]
_VALID_PROMPTS = [
{"prompts": ["foo"], "other_key": "value"},
{"prompt": "foo", "other_key": ["bar", "baz"]},
{"some_key": "foo"},
{"some_key": ["foo"]},
]
_INVALID_PROMPTS = (
[
{"prompts": "foo"},
{"prompt": ["foo"]},
{"some_key": 3},
{"some_key": "foo", "other_key": "bar"},
],
)
@pytest.mark.parametrize(
"inputs",
_VALID_MESSAGES,
)
def test__get_messages_valid(inputs: Dict[str, Any]) -> None:
{"messages": []}
_get_messages(inputs)
@pytest.mark.parametrize(
"inputs",
_VALID_PROMPTS,
)
def test__get_prompts_valid(inputs: Dict[str, Any]) -> None:
_get_prompt(inputs)
@pytest.mark.parametrize(
"inputs",
_VALID_PROMPTS,
)
def test__validate_example_inputs_for_language_model(inputs: Dict[str, Any]) -> None:
mock_ = mock.MagicMock()
mock_.inputs = inputs
_validate_example_inputs_for_language_model(mock_, None)
@pytest.mark.parametrize(
"inputs",
_INVALID_PROMPTS,
)
def test__validate_example_inputs_for_language_model_invalid(
inputs: Dict[str, Any],
) -> None:
mock_ = mock.MagicMock()
mock_.inputs = inputs
with pytest.raises(InputFormatError):
_validate_example_inputs_for_language_model(mock_, None)
def test__validate_example_inputs_for_chain_single_input() -> None:
mock_ = mock.MagicMock()
mock_.inputs = {"foo": "bar"}
chain = mock.MagicMock()
chain.input_keys = ["def not foo"]
_validate_example_inputs_for_chain(mock_, chain, None)
def test__validate_example_inputs_for_chain_input_mapper() -> None:
mock_ = mock.MagicMock()
mock_.inputs = {"foo": "bar", "baz": "qux"}
chain = mock.MagicMock()
chain.input_keys = ["not foo", "not baz", "not qux"]
def wrong_output_format(inputs: dict) -> str:
assert "foo" in inputs
assert "baz" in inputs
return "hehe"
with pytest.raises(InputFormatError, match="must be a dictionary"):
_validate_example_inputs_for_chain(mock_, chain, wrong_output_format)
def wrong_output_keys(inputs: dict) -> dict:
assert "foo" in inputs
assert "baz" in inputs
return {"not foo": "foo", "not baz": "baz"}
with pytest.raises(InputFormatError, match="Missing keys after loading example"):
_validate_example_inputs_for_chain(mock_, chain, wrong_output_keys)
def input_mapper(inputs: dict) -> dict:
assert "foo" in inputs
assert "baz" in inputs
return {"not foo": inputs["foo"], "not baz": inputs["baz"], "not qux": "qux"}
_validate_example_inputs_for_chain(mock_, chain, input_mapper)
def test__validate_example_inputs_for_chain_multi_io() -> None:
mock_ = mock.MagicMock()
mock_.inputs = {"foo": "bar", "baz": "qux"}
chain = mock.MagicMock()
chain.input_keys = ["foo", "baz"]
_validate_example_inputs_for_chain(mock_, chain, None)
def test__validate_example_inputs_for_chain_single_input_multi_expect() -> None:
mock_ = mock.MagicMock()
mock_.inputs = {"foo": "bar"}
chain = mock.MagicMock()
chain.input_keys = ["def not foo", "oh here is another"]
with pytest.raises(InputFormatError, match="Example inputs missing expected"):
_validate_example_inputs_for_chain(mock_, chain, None)
@pytest.mark.parametrize("inputs", _INVALID_PROMPTS)
def test__get_prompts_invalid(inputs: Dict[str, Any]) -> None:
with pytest.raises(InputFormatError):
_get_prompt(inputs)
def test_run_llm_or_chain_with_input_mapper() -> None:
example = Example(
id=uuid.uuid4(),
created_at=_CREATED_AT,
inputs={"the wrong input": "1", "another key": "2"},
outputs={"output": "2"},
dataset_id=str(uuid.uuid4()),
)
def run_val(inputs: dict) -> dict:
assert "the right input" in inputs
return {"output": "2"}
mock_chain = TransformChain(
input_variables=["the right input"],
output_variables=["output"],
transform=run_val,
)
def input_mapper(inputs: dict) -> dict:
assert "the wrong input" in inputs
return {"the right input": inputs["the wrong input"]}
result = _run_llm_or_chain(
example,
{"callbacks": [], "tags": []},
llm_or_chain_factory=lambda: mock_chain,
input_mapper=input_mapper,
)
assert result == {"output": "2", "the right input": "1"}
bad_result = _run_llm_or_chain(
example, {"callbacks": [], "tags": []}, llm_or_chain_factory=lambda: mock_chain
)
assert "Error" in bad_result
# Try with LLM
def llm_input_mapper(inputs: dict) -> str:
assert "the wrong input" in inputs
return "the right input"
mock_llm = FakeLLM(queries={"the right input": "somenumber"})
llm_result = _run_llm_or_chain(
example,
{"callbacks": [], "tags": []},
llm_or_chain_factory=mock_llm,
input_mapper=llm_input_mapper,
)
assert isinstance(llm_result, str)
assert llm_result == "somenumber"
@pytest.mark.parametrize(
"inputs",
[
{"one_key": [_EXAMPLE_MESSAGE], "other_key": "value"},
{
"messages": [[_EXAMPLE_MESSAGE, _EXAMPLE_MESSAGE], _EXAMPLE_MESSAGE],
"other_key": "value",
},
{"prompts": "foo"},
{},
],
)
def test__get_messages_invalid(inputs: Dict[str, Any]) -> None:
with pytest.raises(InputFormatError):
_get_messages(inputs)
@pytest.mark.parametrize("inputs", _VALID_PROMPTS + _VALID_MESSAGES)
def test_run_llm_all_formats(inputs: Dict[str, Any]) -> None:
llm = FakeLLM()
_run_llm(llm, inputs, mock.MagicMock())
@pytest.mark.parametrize("inputs", _VALID_MESSAGES + _VALID_PROMPTS)
def test_run_chat_model_all_formats(inputs: Dict[str, Any]) -> None:
llm = FakeChatModel()
_run_llm(llm, inputs, mock.MagicMock())
@freeze_time("2023-01-01")
async def test_arun_on_dataset(monkeypatch: pytest.MonkeyPatch) -> None:
dataset = Dataset(
id=uuid.uuid4(),
name="test",
description="Test dataset",
owner_id="owner",
created_at=_CREATED_AT,
tenant_id=_TENANT_ID,
_host_url="http://localhost:1984",
)
uuids = [
"0c193153-2309-4704-9a47-17aee4fb25c8",
"0d11b5fd-8e66-4485-b696-4b55155c0c05",
"90d696f0-f10d-4fd0-b88b-bfee6df08b84",
"4ce2c6d8-5124-4c0c-8292-db7bdebcf167",
"7b5a524c-80fa-4960-888e-7d380f9a11ee",
]
examples = [
Example(
id=uuids[0],
created_at=_CREATED_AT,
inputs={"input": "1"},
outputs={"output": "2"},
dataset_id=str(uuid.uuid4()),
),
Example(
id=uuids[1],
created_at=_CREATED_AT,
inputs={"input": "3"},
outputs={"output": "4"},
dataset_id=str(uuid.uuid4()),
),
Example(
id=uuids[2],
created_at=_CREATED_AT,
inputs={"input": "5"},
outputs={"output": "6"},
dataset_id=str(uuid.uuid4()),
),
Example(
id=uuids[3],
created_at=_CREATED_AT,
inputs={"input": "7"},
outputs={"output": "8"},
dataset_id=str(uuid.uuid4()),
),
Example(
id=uuids[4],
created_at=_CREATED_AT,
inputs={"input": "9"},
outputs={"output": "10"},
dataset_id=str(uuid.uuid4()),
),
]
def mock_read_dataset(*args: Any, **kwargs: Any) -> Dataset:
return dataset
def mock_list_examples(*args: Any, **kwargs: Any) -> Iterator[Example]:
return iter(examples)
async def mock_arun_chain(
example: Example,
llm_or_chain: Union[BaseLanguageModel, Chain],
tags: Optional[List[str]] = None,
callbacks: Optional[Any] = None,
**kwargs: Any,
) -> Dict[str, Any]:
return {"result": f"Result for example {example.id}"}
def mock_create_project(*args: Any, **kwargs: Any) -> Any:
proj = mock.MagicMock()
proj.id = "123"
return proj
with mock.patch.object(
Client, "read_dataset", new=mock_read_dataset
), mock.patch.object(Client, "list_examples", new=mock_list_examples), mock.patch(
"langchain.smith.evaluation.runner_utils._arun_llm_or_chain",
new=mock_arun_chain,
), mock.patch.object(Client, "create_project", new=mock_create_project):
client = Client(api_url="http://localhost:1984", api_key="123")
chain = mock.MagicMock()
chain.input_keys = ["foothing"]
results = await arun_on_dataset(
dataset_name="test",
llm_or_chain_factory=lambda: chain,
concurrency_level=2,
project_name="test_project",
client=client,
)
expected = {
str(example.id): {
"output": {
"result": f"Result for example {uuid.UUID(str(example.id))}"
},
"input": {"input": example.inputs["input"]},
"reference": {
"output": example.outputs["output"]
if example.outputs is not None
else None
},
"feedback": [],
# No run since we mock the call to the llm above
"execution_time": None,
"run_id": None,
}
for example in examples
}
assert results["results"] == expected
| [
"([{'prompts': 'foo'}, {'prompt': ['foo']}, {'some_key': 3}, {'some_key': 'foo', 'other_key': 'bar'}],)",
"Foo",
"[{'prompts': ['foo'], 'other_key': 'value'}, {'prompt': 'foo', 'other_key': ['bar', 'baz']}, {'some_key': 'foo'}, {'some_key': ['foo']}]"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~cloudflare_workersai.py | import json
import logging
from typing import Any, Dict, Iterator, List, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
logger = logging.getLogger(__name__)
class CloudflareWorkersAI(LLM):
"""Langchain LLM class to help to access Cloudflare Workers AI service.
To use, you must provide an API token and
account ID to access Cloudflare Workers AI, and
pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
my_account_id = "my_account_id"
my_api_token = "my_secret_api_token"
llm_model = "@cf/meta/llama-2-7b-chat-int8"
cf_ai = CloudflareWorkersAI(
account_id=my_account_id,
api_token=my_api_token,
model=llm_model
)
""" # noqa: E501
account_id: str
api_token: str
model: str = "@cf/meta/llama-2-7b-chat-int8"
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
streaming: bool = False
endpoint_url: str = ""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the Cloudflare Workers AI class."""
super().__init__(**kwargs)
self.endpoint_url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "cloudflare"
@property
def _default_params(self) -> Dict[str, Any]:
"""Default parameters"""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Identifying parameters"""
return {
"account_id": self.account_id,
"api_token": self.api_token,
"model": self.model,
"base_url": self.base_url,
}
def _call_api(self, prompt: str, params: Dict[str, Any]) -> requests.Response:
"""Call Cloudflare Workers API"""
headers = {"Authorization": f"Bearer {self.api_token}"}
data = {"prompt": prompt, "stream": self.streaming, **params}
response = requests.post(self.endpoint_url, headers=headers, json=data)
return response
def _process_response(self, response: requests.Response) -> str:
"""Process API response"""
if response.ok:
data = response.json()
return data["result"]["response"]
else:
raise ValueError(f"Request failed with status {response.status_code}")
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Streaming prediction"""
original_steaming: bool = self.streaming
self.streaming = True
_response_prefix_count = len("data: ")
_response_stream_end = b"data: [DONE]"
for chunk in self._call_api(prompt, kwargs).iter_lines():
if chunk == _response_stream_end:
break
if len(chunk) > _response_prefix_count:
try:
data = json.loads(chunk[_response_prefix_count:])
except Exception as e:
logger.debug(chunk)
raise e
if data is not None and "response" in data:
yield GenerationChunk(text=data["response"])
if run_manager:
run_manager.on_llm_new_token(data["response"])
logger.debug("stream end")
self.streaming = original_steaming
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Regular prediction"""
if self.streaming:
return "".join(
[c.text for c in self._stream(prompt, stop, run_manager, **kwargs)]
)
else:
response = self._call_api(prompt, kwargs)
return self._process_response(response)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~tools~file_management~test_toolkit.py | """Test the FileManagementToolkit."""
from tempfile import TemporaryDirectory
import pytest
from libs.core.langchain_core.tools import BaseTool
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
def test_file_toolkit_get_tools() -> None:
"""Test the get_tools method of FileManagementToolkit."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(root_dir=temp_dir)
tools = toolkit.get_tools()
assert len(tools) > 0
assert all(isinstance(tool, BaseTool) for tool in tools)
def test_file_toolkit_get_tools_with_selection() -> None:
"""Test the get_tools method of FileManagementToolkit with selected_tools."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(
root_dir=temp_dir, selected_tools=["read_file", "write_file"]
)
tools = toolkit.get_tools()
assert len(tools) == 2
tool_names = [tool.name for tool in tools]
assert "read_file" in tool_names
assert "write_file" in tool_names
def test_file_toolkit_invalid_tool() -> None:
"""Test the FileManagementToolkit with an invalid tool."""
with TemporaryDirectory() as temp_dir:
with pytest.raises(ValueError):
FileManagementToolkit(root_dir=temp_dir, selected_tools=["invalid_tool"])
def test_file_toolkit_root_dir() -> None:
"""Test the FileManagementToolkit root_dir handling."""
with TemporaryDirectory() as temp_dir:
toolkit = FileManagementToolkit(root_dir=temp_dir)
tools = toolkit.get_tools()
root_dirs = [tool.root_dir for tool in tools if hasattr(tool, "root_dir")]
assert all(root_dir == temp_dir for root_dir in root_dirs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~open_city_data.py | from typing import Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class OpenCityDataLoader(BaseLoader):
"""Load from `Open City`."""
def __init__(self, city_id: str, dataset_id: str, limit: int):
"""Initialize with dataset_id.
Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6
e.g., city_id = data.sfgov.org
e.g., dataset_id = vw6y-z8j6
Args:
city_id: The Open City city identifier.
dataset_id: The Open City dataset identifier.
limit: The maximum number of documents to load.
"""
self.city_id = city_id
self.dataset_id = dataset_id
self.limit = limit
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records."""
from sodapy import Socrata
client = Socrata(self.city_id, None)
results = client.get(self.dataset_id, limit=self.limit)
for record in results:
yield Document(
page_content=str(record),
metadata={
"source": self.city_id + "_" + self.dataset_id,
},
)
def load(self) -> List[Document]:
"""Load records."""
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def poetry_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)["tool"]["poetry"]
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = poetry_conf["dependencies"]
is_required = {
package_name: isinstance(requirements, str)
or not requirements.get("optional", False)
for package_name, requirements in dependencies.items()
}
required_dependencies = [
package_name for package_name, required in is_required.items() if required
]
assert sorted(required_dependencies) == sorted(
[
"PyYAML",
"SQLAlchemy",
"aiohttp",
"dataclasses-json",
"langchain-core",
"langsmith",
"numpy",
"python",
"requests",
"tenacity",
]
)
unrequired_dependencies = [
package_name for package_name, required in is_required.items() if not required
]
in_extras = [dep for group in poetry_conf["extras"].values() for dep in group]
assert set(unrequired_dependencies) == set(in_extras)
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
assert test_group_deps == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"lark",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"responses",
"syrupy",
"requests-mock",
]
)
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from libs.core.langchain_core.prompts import BasePromptTemplate # noqa: F401
from langchain_community.callbacks import OpenAICallbackHandler # noqa: F401
from langchain_community.chat_models import ChatOpenAI # noqa: F401
from langchain_community.document_loaders import BSHTMLLoader # noqa: F401
from langchain_community.embeddings import OpenAIEmbeddings # noqa: F401
from langchain_community.llms import OpenAI # noqa: F401
from langchain_community.retrievers import VespaRetriever # noqa: F401
from langchain_community.tools import DuckDuckGoSearchResults # noqa: F401
from langchain_community.utilities import (
SearchApiAPIWrapper, # noqa: F401
SerpAPIWrapper, # noqa: F401
)
from langchain_community.vectorstores import FAISS # noqa: F401
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~tencentvectordb.py | """Wrapper around the Tencent vector database."""
from __future__ import annotations
import json
import logging
import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import guard_import
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class ConnectionParams:
"""Tencent vector DB Connection params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95820
Attribute:
url (str) : The access address of the vector database server
that the client needs to connect to.
key (str): API key for client to access the vector database server,
which is used for authentication.
username (str) : Account for client to access the vector database server.
timeout (int) : Request Timeout.
"""
def __init__(self, url: str, key: str, username: str = "root", timeout: int = 10):
self.url = url
self.key = key
self.username = username
self.timeout = timeout
class IndexParams:
"""Tencent vector DB Index params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95826
"""
def __init__(
self,
dimension: int,
shard: int = 1,
replicas: int = 2,
index_type: str = "HNSW",
metric_type: str = "L2",
params: Optional[Dict] = None,
):
self.dimension = dimension
self.shard = shard
self.replicas = replicas
self.index_type = index_type
self.metric_type = metric_type
self.params = params
class TencentVectorDB(VectorStore):
"""Tencent VectorDB as a vector store.
In order to use this you need to have a database instance.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/94951
"""
field_id: str = "id"
field_vector: str = "vector"
field_text: str = "text"
field_metadata: str = "metadata"
def __init__(
self,
embedding: Embeddings,
connection_params: ConnectionParams,
index_params: IndexParams = IndexParams(128),
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
):
self.document = guard_import("tcvectordb.model.document")
tcvectordb = guard_import("tcvectordb")
self.embedding_func = embedding
self.index_params = index_params
self.vdb_client = tcvectordb.VectorDBClient(
url=connection_params.url,
username=connection_params.username,
key=connection_params.key,
timeout=connection_params.timeout,
)
db_list = self.vdb_client.list_databases()
db_exist: bool = False
for db in db_list:
if database_name == db.database_name:
db_exist = True
break
if db_exist:
self.database = self.vdb_client.database(database_name)
else:
self.database = self.vdb_client.create_database(database_name)
try:
self.collection = self.database.describe_collection(collection_name)
if drop_old:
self.database.drop_collection(collection_name)
self._create_collection(collection_name)
except tcvectordb.exceptions.VectorDBException:
self._create_collection(collection_name)
def _create_collection(self, collection_name: str) -> None:
enum = guard_import("tcvectordb.model.enum")
vdb_index = guard_import("tcvectordb.model.index")
index_type = None
for k, v in enum.IndexType.__members__.items():
if k == self.index_params.index_type:
index_type = v
if index_type is None:
raise ValueError("unsupported index_type")
metric_type = None
for k, v in enum.MetricType.__members__.items():
if k == self.index_params.metric_type:
metric_type = v
if metric_type is None:
raise ValueError("unsupported metric_type")
if self.index_params.params is None:
params = vdb_index.HNSWParams(m=16, efconstruction=200)
else:
params = vdb_index.HNSWParams(
m=self.index_params.params.get("M", 16),
efconstruction=self.index_params.params.get("efConstruction", 200),
)
index = vdb_index.Index(
vdb_index.FilterIndex(
self.field_id, enum.FieldType.String, enum.IndexType.PRIMARY_KEY
),
vdb_index.VectorIndex(
self.field_vector,
self.index_params.dimension,
index_type,
metric_type,
params,
),
vdb_index.FilterIndex(
self.field_text, enum.FieldType.String, enum.IndexType.FILTER
),
vdb_index.FilterIndex(
self.field_metadata, enum.FieldType.String, enum.IndexType.FILTER
),
)
self.collection = self.database.create_collection(
name=collection_name,
shard=self.index_params.shard,
replicas=self.index_params.replicas,
description="Collection for LangChain",
index=index,
)
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
connection_params: Optional[ConnectionParams] = None,
index_params: Optional[IndexParams] = None,
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
**kwargs: Any,
) -> TencentVectorDB:
"""Create a collection, indexes it with HNSW, and insert data."""
if len(texts) == 0:
raise ValueError("texts is empty")
if connection_params is None:
raise ValueError("connection_params is empty")
try:
embeddings = embedding.embed_documents(texts[0:1])
except NotImplementedError:
embeddings = [embedding.embed_query(texts[0])]
dimension = len(embeddings[0])
if index_params is None:
index_params = IndexParams(dimension=dimension)
else:
index_params.dimension = dimension
vector_db = cls(
embedding=embedding,
connection_params=connection_params,
index_params=index_params,
database_name=database_name,
collection_name=collection_name,
drop_old=drop_old,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into TencentVectorDB."""
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
pks: list[str] = []
total_count = len(embeddings)
for start in range(0, total_count, batch_size):
# Grab end index
docs = []
end = min(start + batch_size, total_count)
for id in range(start, end, 1):
metadata = "{}"
if metadatas is not None:
metadata = json.dumps(metadatas[id])
doc = self.document.Document(
id="{}-{}-{}".format(time.time_ns(), hash(texts[id]), id),
vector=embeddings[id],
text=texts[id],
metadata=metadata,
)
docs.append(doc)
pks.append(str(id))
self.collection.upsert(docs, timeout)
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=False,
limit=k,
timeout=timeout,
)
# Organize results.
ret: List[Tuple[Document, float]] = []
if res is None or len(res) == 0:
return ret
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
pair = (doc, result.get("score", 0.0))
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=True,
limit=fetch_k,
timeout=timeout,
)
# Organize results.
documents = []
ordered_result_embeddings = []
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
documents.append(doc)
ordered_result_embeddings.append(result.get(self.field_vector))
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_weaviate.py | """Test Weaviate functionality."""
import logging
import os
import uuid
from typing import Generator, Union
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores.weaviate import Weaviate
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
class TestWeaviate:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def weaviate_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the weaviate url."""
from weaviate import Client
url = "http://localhost:8080"
yield url
# Clear the test index
client = Client(url)
client.schema.delete_all()
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_without_metadata(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = Weaviate.from_texts(
texts,
embedding_openai,
weaviate_url=weaviate_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_filter(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search(
"foo",
k=2,
where_filter={"path": ["page"], "operator": "Equal", "valueNumber": 0},
)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_additional(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata and additional."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search(
"foo",
k=1,
additional=["certainty"],
)
assert output == [
Document(
page_content="foo",
metadata={"page": 0, "_additional": {"certainty": 1}},
)
]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_uuids(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with uuids."""
texts = ["foo", "bar", "baz"]
# Weaviate replaces the object if the UUID already exists
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, "same-name") for text in texts]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
weaviate_url=weaviate_url,
uuids=uuids,
)
output = docsearch.similarity_search("foo", k=2)
assert len(output) == 1
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_by_vector(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
foo_embedding = embedding_openai.embed_query("foo")
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_with_filter(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0}
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search(
"foo", k=2, where_filter=where_filter
)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
]
def test_add_texts_with_given_embedding(self, weaviate_url: str) -> None:
texts = ["foo", "bar", "baz"]
embedding = FakeEmbeddings()
docsearch = Weaviate.from_texts(
texts, embedding=embedding, weaviate_url=weaviate_url
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search_by_vector(
embedding.embed_query("foo"), k=2
)
assert output == [
Document(page_content="foo"),
Document(page_content="foo"),
]
def test_add_texts_with_given_uuids(self, weaviate_url: str) -> None:
texts = ["foo", "bar", "baz"]
embedding = FakeEmbeddings()
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts]
docsearch = Weaviate.from_texts(
texts,
embedding=embedding,
weaviate_url=weaviate_url,
uuids=uuids,
)
# Weaviate replaces the object if the UUID already exists
docsearch.add_texts(["foo"], uuids=[uuids[0]])
output = docsearch.similarity_search_by_vector(
embedding.embed_query("foo"), k=2
)
assert output[0] == Document(page_content="foo")
assert output[1] != Document(page_content="foo")
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box_list" in res.keys():
res["answer_box"] = res["answer_box_list"]
if "answer_box" in res.keys():
answer_box = res["answer_box"]
if isinstance(answer_box, list):
answer_box = answer_box[0]
if "result" in answer_box.keys():
return answer_box["result"]
elif "answer" in answer_box.keys():
return answer_box["answer"]
elif "snippet" in answer_box.keys():
return answer_box["snippet"]
elif "snippet_highlighted_words" in answer_box.keys():
return answer_box["snippet_highlighted_words"]
else:
answer = {}
for key, value in answer_box.items():
if not isinstance(value, (list, dict)) and not (
isinstance(value, str) and value.startswith("http")
):
answer[key] = value
return str(answer)
elif "events_results" in res.keys():
return res["events_results"][:10]
elif "sports_results" in res.keys():
return res["sports_results"]
elif "top_stories" in res.keys():
return res["top_stories"]
elif "news_results" in res.keys():
return res["news_results"]
elif "jobs_results" in res.keys() and "jobs" in res["jobs_results"].keys():
return res["jobs_results"]["jobs"]
elif (
"shopping_results" in res.keys()
and "title" in res["shopping_results"][0].keys()
):
return res["shopping_results"][:3]
elif "questions_and_answers" in res.keys():
return res["questions_and_answers"]
elif (
"popular_destinations" in res.keys()
and "destinations" in res["popular_destinations"].keys()
):
return res["popular_destinations"]["destinations"]
elif "top_sights" in res.keys() and "sights" in res["top_sights"].keys():
return res["top_sights"]["sights"]
elif (
"images_results" in res.keys()
and "thumbnail" in res["images_results"][0].keys()
):
return str([item["thumbnail"] for item in res["images_results"][:10]])
snippets = []
if "knowledge_graph" in res.keys():
knowledge_graph = res["knowledge_graph"]
title = knowledge_graph["title"] if "title" in knowledge_graph else ""
if "description" in knowledge_graph.keys():
snippets.append(knowledge_graph["description"])
for key, value in knowledge_graph.items():
if (
isinstance(key, str)
and isinstance(value, str)
and key not in ["title", "description"]
and not key.endswith("_stick")
and not key.endswith("_link")
and not value.startswith("http")
):
snippets.append(f"{title} {key}: {value}.")
for organic_result in res.get("organic_results", []):
if "snippet" in organic_result.keys():
snippets.append(organic_result["snippet"])
elif "snippet_highlighted_words" in organic_result.keys():
snippets.append(organic_result["snippet_highlighted_words"])
elif "rich_snippet" in organic_result.keys():
snippets.append(organic_result["rich_snippet"])
elif "rich_snippet_table" in organic_result.keys():
snippets.append(organic_result["rich_snippet_table"])
elif "link" in organic_result.keys():
snippets.append(organic_result["link"])
if "buying_guide" in res.keys():
snippets.append(res["buying_guide"])
if "local_results" in res.keys() and "places" in res["local_results"].keys():
snippets.append(res["local_results"]["places"])
if len(snippets) > 0:
return str(snippets)
else:
return "No good search result found"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~cohere.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from libs.core.langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.cohere import BaseCohere
def get_role(message: BaseMessage) -> str:
"""Get the role of the message.
Args:
message: The message.
Returns:
The role of the message.
Raises:
ValueError: If the message is of an unknown type.
"""
if isinstance(message, ChatMessage) or isinstance(message, HumanMessage):
return "User"
elif isinstance(message, AIMessage):
return "Chatbot"
elif isinstance(message, SystemMessage):
return "System"
else:
raise ValueError(f"Got unknown type {message}")
def get_cohere_chat_request(
messages: List[BaseMessage],
*,
connectors: Optional[List[Dict[str, str]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Get the request for the Cohere chat API.
Args:
messages: The messages.
connectors: The connectors.
**kwargs: The keyword arguments.
Returns:
The request for the Cohere chat API.
"""
documents = (
None
if "source_documents" not in kwargs
else [
{
"snippet": doc.page_content,
"id": doc.metadata.get("id") or f"doc-{str(i)}",
}
for i, doc in enumerate(kwargs["source_documents"])
]
)
kwargs.pop("source_documents", None)
maybe_connectors = connectors if documents is None else None
# by enabling automatic prompt truncation, the probability of request failure is
# reduced with minimal impact on response quality
prompt_truncation = (
"AUTO" if documents is not None or connectors is not None else None
)
return {
"message": messages[-1].content,
"chat_history": [
{"role": get_role(x), "message": x.content} for x in messages[:-1]
],
"documents": documents,
"connectors": maybe_connectors,
"prompt_truncation": prompt_truncation,
**kwargs,
}
class ChatCohere(BaseChatModel, BaseCohere):
"""`Cohere` chat large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatCohere
from libs.core.langchain_core.messages import HumanMessage
chat = ChatCohere(model="foo")
result = chat([HumanMessage(content="Hello")])
print(result.content)
"""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
arbitrary_types_allowed = True
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "cohere-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
stream = self.client.chat(**request, stream=True)
for data in stream:
if data.event_type == "text-generation":
delta = data.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
stream = await self.async_client.chat(**request, stream=True)
async for data in stream:
if data.event_type == "text-generation":
delta = data.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta)
def _get_generation_info(self, response: Any) -> Dict[str, Any]:
"""Get the generation info from cohere API response."""
return {
"documents": response.documents,
"citations": response.citations,
"search_results": response.search_results,
"search_queries": response.search_queries,
"token_count": response.token_count,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = self._get_generation_info(response)
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request, stream=False)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = self._get_generation_info(response)
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
return len(self.client.tokenize(text).tokens)
| [
"AUTO"
] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~chains~test_natbot.py | """Test functionality related to natbot."""
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models.llms import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chains.natbot.base import NatBotChain
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return `foo` if longer than 10000 words, else `bar`."""
if len(prompt) > 10000:
return "foo"
else:
return "bar"
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def get_num_tokens(self, text: str) -> int:
return len(text.split())
@property
def _identifying_params(self) -> Dict[str, Any]:
return {}
def test_proper_inputs() -> None:
"""Test that natbot shortens inputs correctly."""
nat_bot_chain = NatBotChain.from_llm(FakeLLM(), objective="testing")
url = "foo" * 10000
browser_content = "foo" * 10000
output = nat_bot_chain.execute(url, browser_content)
assert output == "bar"
def test_variable_key_naming() -> None:
"""Test that natbot handles variable key naming correctly."""
nat_bot_chain = NatBotChain.from_llm(
FakeLLM(),
objective="testing",
input_url_key="u",
input_browser_content_key="b",
output_key="c",
)
output = nat_bot_chain.execute("foo", "foo")
assert output == "bar"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~milvus.py | """Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
class MilvusRetriever(BaseRetriever):
"""`Milvus API` retriever."""
embedding_function: Embeddings
collection_name: str = "LangChainCollection"
connection_args: Optional[Dict[str, Any]] = None
consistency_level: str = "Session"
search_params: Optional[dict] = None
store: Milvus
retriever: BaseRetriever
@root_validator(pre=True)
def create_retriever(cls, values: Dict) -> Dict:
"""Create the Milvus store and retriever."""
values["store"] = Milvus(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~textgen.py | import json
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
import requests
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from libs.core.langchain_core.pydantic_v1 import Field
logger = logging.getLogger(__name__)
class TextGen(LLM):
"""Text generation models from WebUI.
To use, you should have the text-generation-webui installed, a model loaded,
and --api added as a command-line option.
Suggested installation, use one-click installer for your OS:
https://github.com/oobabooga/text-generation-webui#one-click-installers
Parameters below taken from text-generation-webui api example:
https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:8500")
"""
model_url: str
"""The full URL to the textgen webui including http[s]://host:port """
preset: Optional[str] = None
"""The preset to use in the textgen webui """
max_new_tokens: Optional[int] = 250
"""The maximum number of tokens to generate."""
do_sample: bool = Field(True, alias="do_sample")
"""Do sample"""
temperature: Optional[float] = 1.3
"""Primary factor to control randomness of outputs. 0 = deterministic
(only the most likely token is used). Higher value = more randomness."""
top_p: Optional[float] = 0.1
"""If not set to 1, select tokens with probabilities adding up to less than this
number. Higher value = higher range of possible random results."""
typical_p: Optional[float] = 1
"""If not set to 1, select only tokens that are at least this much more likely to
appear than random tokens, given the prior text."""
epsilon_cutoff: Optional[float] = 0 # In units of 1e-4
"""Epsilon cutoff"""
eta_cutoff: Optional[float] = 0 # In units of 1e-4
"""ETA cutoff"""
repetition_penalty: Optional[float] = 1.18
"""Exponential penalty factor for repeating prior tokens. 1 means no penalty,
higher value = less repetition, lower value = more repetition."""
top_k: Optional[float] = 40
"""Similar to top_p, but select instead only the top_k most likely tokens.
Higher value = higher range of possible random results."""
min_length: Optional[int] = 0
"""Minimum generation length in tokens."""
no_repeat_ngram_size: Optional[int] = 0
"""If not set to 0, specifies the length of token sets that are completely blocked
from repeating at all. Higher values = blocks larger phrases,
lower values = blocks words or letters from repeating.
Only 0 or high values are a good idea in most cases."""
num_beams: Optional[int] = 1
"""Number of beams"""
penalty_alpha: Optional[float] = 0
"""Penalty Alpha"""
length_penalty: Optional[float] = 1
"""Length Penalty"""
early_stopping: bool = Field(False, alias="early_stopping")
"""Early stopping"""
seed: int = Field(-1, alias="seed")
"""Seed (-1 for random)"""
add_bos_token: bool = Field(True, alias="add_bos_token")
"""Add the bos_token to the beginning of prompts.
Disabling this can make the replies more creative."""
truncation_length: Optional[int] = 2048
"""Truncate the prompt up to this length. The leftmost tokens are removed if
the prompt exceeds this length. Most models require this to be at most 2048."""
ban_eos_token: bool = Field(False, alias="ban_eos_token")
"""Ban the eos_token. Forces the model to never end the generation prematurely."""
skip_special_tokens: bool = Field(True, alias="skip_special_tokens")
"""Skip special tokens. Some specific models need this unset."""
stopping_strings: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling textgen."""
return {
"max_new_tokens": self.max_new_tokens,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_p": self.top_p,
"typical_p": self.typical_p,
"epsilon_cutoff": self.epsilon_cutoff,
"eta_cutoff": self.eta_cutoff,
"repetition_penalty": self.repetition_penalty,
"top_k": self.top_k,
"min_length": self.min_length,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
"num_beams": self.num_beams,
"penalty_alpha": self.penalty_alpha,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"seed": self.seed,
"add_bos_token": self.add_bos_token,
"truncation_length": self.truncation_length,
"ban_eos_token": self.ban_eos_token,
"skip_special_tokens": self.skip_special_tokens,
"stopping_strings": self.stopping_strings,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_url": self.model_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "textgen"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
if self.stopping_strings and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
params["stopping_strings"] = self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}")
result = ""
return result
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}")
result = ""
return result
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~file_management~list_dir.py | import os
from typing import Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.tools import BaseTool
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class DirectoryListingInput(BaseModel):
"""Input for ListDirectoryTool."""
dir_path: str = Field(default=".", description="Subdirectory to list.")
class ListDirectoryTool(BaseFileToolMixin, BaseTool):
"""Tool that lists files and directories in a specified folder."""
name: str = "list_directory"
args_schema: Type[BaseModel] = DirectoryListingInput
description: str = "List files and directories in a specified folder"
def _run(
self,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return "\n".join(entries)
else:
return f"No files found in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"List files and directories in a specified folder"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~cerebriumai.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class CerebriumAI(LLM):
"""CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed.
You should also have the environment variable ``CEREBRIUMAI_API_KEY``
set with your API key or pass it as a named argument in the constructor.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="", cerebriumai_api_key="my-api-key")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY")
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
headers: Dict = {
"Authorization": cast(
SecretStr, self.cerebriumai_api_key
).get_secret_value(),
"Content-Type": "application/json",
}
params = self.model_kwargs or {}
payload = {"prompt": prompt, **params, **kwargs}
response = requests.post(self.endpoint_url, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
text = data["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
else:
response.raise_for_status()
return ""
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~web_base.py | """Web base loader class."""
import asyncio
import logging
import warnings
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
import aiohttp
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", "No description found.")
if html := soup.find("html"):
metadata["language"] = html.get("lang", "No language found.")
return metadata
class WebBaseLoader(BaseLoader):
"""Load HTML pages using `urllib` and parse them with `BeautifulSoup'."""
def __init__(
self,
web_path: Union[str, Sequence[str]] = "",
header_template: Optional[dict] = None,
verify_ssl: bool = True,
proxies: Optional[dict] = None,
continue_on_failure: bool = False,
autoset_encoding: bool = True,
encoding: Optional[str] = None,
web_paths: Sequence[str] = (),
requests_per_second: int = 2,
default_parser: str = "html.parser",
requests_kwargs: Optional[Dict[str, Any]] = None,
raise_for_status: bool = False,
bs_get_text_kwargs: Optional[Dict[str, Any]] = None,
bs_kwargs: Optional[Dict[str, Any]] = None,
session: Any = None,
) -> None:
"""Initialize loader.
Args:
web_paths: Web paths to load from.
requests_per_second: Max number of concurrent requests to make.
default_parser: Default parser to use for BeautifulSoup.
requests_kwargs: kwargs for requests
raise_for_status: Raise an exception if http status code denotes an error.
bs_get_text_kwargs: kwargs for beatifulsoup4 get_text
bs_kwargs: kwargs for beatifulsoup4 web page parsing
"""
# web_path kept for backwards-compatibility.
if web_path and web_paths:
raise ValueError(
"Received web_path and web_paths. Only one can be specified. "
"web_path is deprecated, web_paths should be used."
)
if web_paths:
self.web_paths = list(web_paths)
elif isinstance(web_path, str):
self.web_paths = [web_path]
elif isinstance(web_path, Sequence):
self.web_paths = list(web_path)
else:
raise TypeError(
f"web_path must be str or Sequence[str] got ({type(web_path)}) or"
f" web_paths must be Sequence[str] got ({type(web_paths)})"
)
self.requests_per_second = requests_per_second
self.default_parser = default_parser
self.requests_kwargs = requests_kwargs or {}
self.raise_for_status = raise_for_status
self.bs_get_text_kwargs = bs_get_text_kwargs or {}
self.bs_kwargs = bs_kwargs or {}
if session:
self.session = session
else:
session = requests.Session()
header_template = header_template or default_header_template.copy()
if not header_template.get("User-Agent"):
try:
from fake_useragent import UserAgent
header_template["User-Agent"] = UserAgent().random
except ImportError:
logger.info(
"fake_useragent not found, using default user agent."
"To get a realistic header for requests, "
"`pip install fake_useragent`."
)
session.headers = dict(header_template)
session.verify = verify_ssl
if proxies:
session.proxies.update(proxies)
self.session = session
self.continue_on_failure = continue_on_failure
self.autoset_encoding = autoset_encoding
self.encoding = encoding
@property
def web_path(self) -> str:
if len(self.web_paths) > 1:
raise ValueError("Multiple webpaths found.")
return self.web_paths[0]
async def _fetch(
self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5
) -> str:
async with aiohttp.ClientSession() as session:
for i in range(retries):
try:
async with session.get(
url,
headers=self.session.headers,
ssl=None if self.session.verify else False,
) as response:
return await response.text()
except aiohttp.ClientConnectionError as e:
if i == retries - 1:
raise
else:
logger.warning(
f"Error fetching {url} with attempt "
f"{i + 1}/{retries}: {e}. Retrying..."
)
await asyncio.sleep(cooldown * backoff**i)
raise ValueError("retry count exceeded")
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
async with semaphore:
try:
return await self._fetch(url)
except Exception as e:
if self.continue_on_failure:
logger.warning(
f"Error fetching {url}, skipping due to"
f" continue_on_failure=True"
)
return ""
logger.exception(
f"Error fetching {url} and aborting, use continue_on_failure=True "
"to continue loading urls after encountering an error."
)
raise e
async def fetch_all(self, urls: List[str]) -> Any:
"""Fetch all urls concurrently with rate limiting."""
semaphore = asyncio.Semaphore(self.requests_per_second)
tasks = []
for url in urls:
task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore))
tasks.append(task)
try:
from tqdm.asyncio import tqdm_asyncio
return await tqdm_asyncio.gather(
*tasks, desc="Fetching pages", ascii=True, mininterval=1
)
except ImportError:
warnings.warn("For better logging of progress, `pip install tqdm`")
return await asyncio.gather(*tasks)
@staticmethod
def _check_parser(parser: str) -> None:
"""Check that parser is valid for bs4."""
valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"]
if parser not in valid_parsers:
raise ValueError(
"`parser` must be one of " + ", ".join(valid_parsers) + "."
)
def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]:
"""Fetch all urls, then return soups for all results."""
from bs4 import BeautifulSoup
results = asyncio.run(self.fetch_all(urls))
final_results = []
for i, result in enumerate(results):
url = urls[i]
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
final_results.append(BeautifulSoup(result, parser, **self.bs_kwargs))
return final_results
def _scrape(
self,
url: str,
parser: Union[str, None] = None,
bs_kwargs: Optional[dict] = None,
) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self.session.get(url, **self.requests_kwargs)
if self.raise_for_status:
html_doc.raise_for_status()
if self.encoding is not None:
html_doc.encoding = self.encoding
elif self.autoset_encoding:
html_doc.encoding = html_doc.apparent_encoding
return BeautifulSoup(html_doc.text, parser, **(bs_kwargs or {}))
def scrape(self, parser: Union[str, None] = None) -> Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser=parser, bs_kwargs=self.bs_kwargs)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load text from the url(s) in web_path."""
return list(self.lazy_load())
def aload(self) -> List[Document]:
"""Load text from the urls in web_path async into Documents."""
results = self.scrape_all(self.web_paths)
docs = []
for path, soup in zip(self.web_paths, results):
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [
"{'User-Agent': '', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': 'https://www.google.com/', 'DNT': '1', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1'}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~ainetwork~transfer.py | import json
from typing import Optional, Type
from libs.core.langchain_core.callbacks import AsyncCallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class TransferSchema(BaseModel):
"""Schema for transfer operations."""
address: str = Field(..., description="Address to transfer AIN to")
amount: int = Field(..., description="Amount of AIN to transfer")
class AINTransfer(AINBaseTool):
"""Tool for transfer operations."""
name: str = "AINtransfer"
description: str = "Transfers AIN to a specified address"
args_schema: Type[TransferSchema] = TransferSchema
async def _arun(
self,
address: str,
amount: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
try:
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{type(e).__name__}: {str(e)}"
| [
"Transfers AIN to a specified address"
] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~llms~test_nlpcloud.py | """Test NLPCloud API wrapper."""
from pathlib import Path
from typing import cast
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.loading import load_llm
from langchain_community.llms.nlpcloud import NLPCloud
from tests.integration_tests.llms.utils import assert_llm_equality
def test_nlpcloud_call() -> None:
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an NLPCloud LLM."""
llm = NLPCloud(max_length=10)
llm.save(file_path=tmp_path / "nlpcloud.yaml")
loaded_llm = load_llm(tmp_path / "nlpcloud.yaml")
assert_llm_equality(llm, loaded_llm)
def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> None:
"""Test that nlpcloud api key is a secret key."""
# test initialization from init
assert isinstance(NLPCloud(nlpcloud_api_key="1").nlpcloud_api_key, SecretStr)
monkeypatch.setenv("NLPCLOUD_API_KEY", "secret-api-key")
llm = NLPCloud()
assert isinstance(llm.nlpcloud_api_key, SecretStr)
assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key"
print(llm.nlpcloud_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~amadeus~flight_search.py | import logging
from datetime import datetime as dt
from typing import Dict, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.amadeus.base import AmadeusBaseTool
logger = logging.getLogger(__name__)
class FlightSearchSchema(BaseModel):
"""Schema for the AmadeusFlightSearch tool."""
originLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's origin airport. "
)
)
destinationLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's destination airport. "
)
)
departureDateTimeEarliest: str = Field(
description=(
" The earliest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
departureDateTimeLatest: str = Field(
description=(
" The latest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
page_number: int = Field(
default=1,
description="The specific page number of flight results to retrieve",
)
class AmadeusFlightSearch(AmadeusBaseTool):
"""Tool for searching for a single flight between two airports."""
name: str = "single_flight_search"
description: str = (
" Use this tool to search for a single flight between the origin and "
" destination airports at a departure between an earliest and "
" latest datetime. "
)
args_schema: Type[FlightSearchSchema] = FlightSearchSchema
def _run(
self,
originLocationCode: str,
destinationLocationCode: str,
departureDateTimeEarliest: str,
departureDateTimeLatest: str,
page_number: int = 1,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> list:
try:
from amadeus import ResponseError
except ImportError as e:
raise ImportError(
"Unable to import amadeus, please install with `pip install amadeus`."
) from e
RESULTS_PER_PAGE = 10
# Authenticate and retrieve a client
client = self.client
# Check that earliest and latest dates are in the same day
earliestDeparture = dt.strptime(departureDateTimeEarliest, "%Y-%m-%dT%H:%M:%S")
latestDeparture = dt.strptime(departureDateTimeLatest, "%Y-%m-%dT%H:%M:%S")
if earliestDeparture.date() != latestDeparture.date():
logger.error(
" Error: Earliest and latest departure dates need to be the "
" same date. If you're trying to search for round-trip "
" flights, call this function for the outbound flight first, "
" and then call again for the return flight. "
)
return [None]
# Collect all results from the Amadeus Flight Offers Search API
try:
response = client.shopping.flight_offers_search.get(
originLocationCode=originLocationCode,
destinationLocationCode=destinationLocationCode,
departureDate=latestDeparture.strftime("%Y-%m-%d"),
adults=1,
)
except ResponseError as error:
print(error)
# Generate output dictionary
output = []
for offer in response.data:
itinerary: Dict = {}
itinerary["price"] = {}
itinerary["price"]["total"] = offer["price"]["total"]
currency = offer["price"]["currency"]
currency = response.result["dictionaries"]["currencies"][currency]
itinerary["price"]["currency"] = {}
itinerary["price"]["currency"] = currency
segments = []
for segment in offer["itineraries"][0]["segments"]:
flight = {}
flight["departure"] = segment["departure"]
flight["arrival"] = segment["arrival"]
flight["flightNumber"] = segment["number"]
carrier = segment["carrierCode"]
carrier = response.result["dictionaries"]["carriers"][carrier]
flight["carrier"] = carrier
segments.append(flight)
itinerary["segments"] = []
itinerary["segments"] = segments
output.append(itinerary)
# Filter out flights after latest departure time
for index, offer in enumerate(output):
offerDeparture = dt.strptime(
offer["segments"][0]["departure"]["at"], "%Y-%m-%dT%H:%M:%S"
)
if offerDeparture > latestDeparture:
output.pop(index)
# Return the paginated results
startIndex = (page_number - 1) * RESULTS_PER_PAGE
endIndex = startIndex + RESULTS_PER_PAGE
return output[startIndex:endIndex]
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_anthropic.py | """Test Anthropic API wrapper."""
from typing import List
import pytest
from libs.core.langchain_core.callbacks import CallbackManager
from libs.core.langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from libs.core.langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
)
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.scheduled
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model="test")
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model="test")
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
@pytest.mark.scheduled
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model="test", streaming=True)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
chat([message])
assert callback_handler.llm_streams > 1
@pytest.mark.scheduled
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
chat_messages: List[BaseMessage] = [
HumanMessage(content="How many toes do dogs have?")
]
result: LLMResult = await chat.agenerate([chat_messages])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
| [
"How many toes do dogs have?",
"Write me a sentence with 10 words.",
"Hello"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~toml.py | import json
from pathlib import Path
from typing import Iterator, List, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class TomlLoader(BaseLoader):
"""Load `TOML` files.
It can load a single source file or several files in a single
directory.
"""
def __init__(self, source: Union[str, Path]):
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
def load(self) -> List[Document]:
"""Load and return all documents."""
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Lazily load the TOML documents from the source file or directory."""
import tomli
if self.source.is_file() and self.source.suffix == ".toml":
files = [self.source]
elif self.source.is_dir():
files = list(self.source.glob("**/*.toml"))
else:
raise ValueError("Invalid source path or file type")
for file_path in files:
with file_path.open("r", encoding="utf-8") as file:
content = file.read()
try:
data = tomli.loads(content)
doc = Document(
page_content=json.dumps(data),
metadata={"source": str(file_path)},
)
yield doc
except tomli.TOMLDecodeError as e:
print(f"Error parsing TOML file {file_path}: {e}")
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~edenai~audio_speech_to_text.py | from __future__ import annotations
import json
import logging
import time
from typing import List, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiSpeechToTextTool(EdenaiTool):
"""Tool that queries the Eden AI Speech To Text API.
for api reference check edenai documentation:
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
edenai_api_key: Optional[str] = None
name = "edenai_speech_to_text"
description = (
"A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text."
"Input should be a url to an audio file."
)
is_async = True
language: Optional[str] = "en"
speakers: Optional[int]
profanity_filter: bool = False
custom_vocabulary: Optional[List[str]]
feature: str = "audio"
subfeature: str = "speech_to_text_async"
base_url = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
def _wait_processing(self, url: str) -> requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp["status"] == "finished":
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp['results'][self.providers[0]]['error']}"""
)
else:
return audio_analysis_result
raise Exception("Edenai speech to text job id processing Timed out")
def _parse_response(self, response: dict) -> str:
return response["public_id"]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"file_url": query,
"language": self.language,
"speakers": self.speakers,
"profanity_filter": self.profanity_filter,
"custom_vocabulary": self.custom_vocabulary,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text["results"][self.providers[0]]["text"]
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~vectorstores~test_imports.py | from libs.core.langchain_core.vectorstores import VectorStore
from langchain import vectorstores
def test_all_imports() -> None:
"""Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in [
"AlibabaCloudOpenSearchSettings",
"ClickhouseSettings",
"MyScaleSettings",
]:
assert issubclass(getattr(vectorstores, cls), VectorStore)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_loaders~gmail.py | import base64
import re
from typing import Any, Iterator
from libs.core.langchain_core.chat_sessions import ChatSession
from libs.core.langchain_core.messages import HumanMessage
from langchain_community.chat_loaders.base import BaseChatLoader
def _extract_email_content(msg: Any) -> HumanMessage:
from_email = None
for values in msg["payload"]["headers"]:
name = values["name"]
if name == "From":
from_email = values["value"]
if from_email is None:
raise ValueError
for part in msg["payload"]["parts"]:
if part["mimeType"] == "text/plain":
data = part["body"]["data"]
data = base64.urlsafe_b64decode(data).decode("utf-8")
# Regular expression to split the email body at the first
# occurrence of a line that starts with "On ... wrote:"
pattern = re.compile(r"\r\nOn .+(\r\n)*wrote:\r\n")
# Split the email body and extract the first part
newest_response = re.split(pattern, data)[0]
message = HumanMessage(
content=newest_response, additional_kwargs={"sender": from_email}
)
return message
raise ValueError
def _get_message_data(service: Any, message: Any) -> ChatSession:
msg = service.users().messages().get(userId="me", id=message["id"]).execute()
message_content = _extract_email_content(msg)
in_reply_to = None
email_data = msg["payload"]["headers"]
for values in email_data:
name = values["name"]
if name == "In-Reply-To":
in_reply_to = values["value"]
if in_reply_to is None:
raise ValueError
thread_id = msg["threadId"]
thread = service.users().threads().get(userId="me", id=thread_id).execute()
messages = thread["messages"]
response_email = None
for message in messages:
email_data = message["payload"]["headers"]
for values in email_data:
if values["name"] == "Message-ID":
message_id = values["value"]
if message_id == in_reply_to:
response_email = message
if response_email is None:
raise ValueError
starter_content = _extract_email_content(response_email)
return ChatSession(messages=[starter_content, message_content])
class GMailLoader(BaseChatLoader):
"""Load data from `GMail`.
There are many ways you could want to load data from GMail.
This loader is currently fairly opinionated in how to do so.
The way it does it is it first looks for all messages that you have sent.
It then looks for messages where you are responding to a previous email.
It then fetches that previous email, and creates a training example
of that email, followed by your email.
Note that there are clear limitations here. For example,
all examples created are only looking at the previous email for context.
To use:
- Set up a Google Developer Account:
Go to the Google Developer Console, create a project,
and enable the Gmail API for that project.
This will give you a credentials.json file that you'll need later.
"""
def __init__(self, creds: Any, n: int = 100, raise_error: bool = False) -> None:
super().__init__()
self.creds = creds
self.n = n
self.raise_error = raise_error
def lazy_load(self) -> Iterator[ChatSession]:
from googleapiclient.discovery import build
service = build("gmail", "v1", credentials=self.creds)
results = (
service.users()
.messages()
.list(userId="me", labelIds=["SENT"], maxResults=self.n)
.execute()
)
messages = results.get("messages", [])
for message in messages:
try:
yield _get_message_data(service, message)
except Exception as e:
# TODO: handle errors better
if self.raise_error:
raise e
else:
pass
| [] |
2024-01-10 | mth93/langchain | libs~partners~nvidia-ai-endpoints~tests~integration_tests~test_chat_models.py | """Test ChatNVIDIA chat model."""
from libs.core.langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA
def test_chat_ai_endpoints() -> None:
"""Test ChatNVIDIA wrapper."""
chat = ChatNVIDIA(
model="llama2_13b",
temperature=0.7,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_ai_endpoints_model() -> None:
"""Test wrapper handles model."""
chat = ChatNVIDIA(model="mistral")
assert chat.model == "mistral"
def test_chat_ai_endpoints_system_message() -> None:
"""Test wrapper with system message."""
chat = ChatNVIDIA(model="llama2_13b", max_tokens=36)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
## TODO: Not sure if we want to support the n syntax. Trash or keep test
def test_ai_endpoints_streaming() -> None:
"""Test streaming tokens from ai endpoints."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=36)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_ai_endpoints_astream() -> None:
"""Test streaming tokens from ai endpoints."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=35)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_ai_endpoints_abatch() -> None:
"""Test streaming tokens."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=36)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ai_endpoints_abatch_tags() -> None:
"""Test batch tokens."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=55)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
def test_ai_endpoints_batch() -> None:
"""Test batch tokens."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=60)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ai_endpoints_ainvoke() -> None:
"""Test invoke tokens."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=60)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
def test_ai_endpoints_invoke() -> None:
"""Test invoke tokens."""
llm = ChatNVIDIA(model="llama2_13b", max_tokens=60)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~gcs_file.py | import os
import tempfile
from typing import Callable, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
from langchain_community.utilities.vertexai import get_client_info
class GCSFileLoader(BaseLoader):
"""Load from GCS file."""
def __init__(
self,
project_name: str,
bucket: str,
blob: str,
loader_func: Optional[Callable[[str], BaseLoader]] = None,
):
"""Initialize with bucket and key name.
Args:
project_name: The name of the project to load
bucket: The name of the GCS bucket.
blob: The name of the GCS blob to load.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the
UnstructuredFileLoader is used.
Examples:
To use an alternative PDF loader:
>> from from langchain_community.document_loaders import PyPDFLoader
>> loader = GCSFileLoader(..., loader_func=PyPDFLoader)
To use UnstructuredFileLoader with additional arguments:
>> loader = GCSFileLoader(...,
>> loader_func=lambda x: UnstructuredFileLoader(x, mode="elements"))
"""
self.bucket = bucket
self.blob = blob
self.project_name = project_name
def default_loader_func(file_path: str) -> BaseLoader:
return UnstructuredFileLoader(file_path)
self._loader_func = loader_func if loader_func else default_loader_func
def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
# initialize a client
storage_client = storage.Client(
self.project_name, client_info=get_client_info("google-cloud-storage")
)
# Create a bucket object for our bucket
bucket = storage_client.get_bucket(self.bucket)
# Create a blob object from the filepath
blob = bucket.blob(self.blob)
# retrieve custom metadata associated with the blob
metadata = bucket.get_blob(self.blob).metadata
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.blob}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
blob.download_to_filename(file_path)
loader = self._loader_func(file_path)
docs = loader.load()
for doc in docs:
if "source" in doc.metadata:
doc.metadata["source"] = f"gs://{self.bucket}/{self.blob}"
if metadata:
doc.metadata.update(metadata)
return docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~openai_functions~citation_fuzzy_match.py | from typing import Iterator, List
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.messages import HumanMessage, SystemMessage
from libs.core.langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
from langchain.output_parsers.openai_functions import (
PydanticOutputFunctionsParser,
)
class FactWithEvidence(BaseModel):
"""Class representing a single statement.
Each fact has a body and a list of sources.
If there are multiple facts make sure to break them apart
such that each one only uses a set of sources that are relevant to it.
"""
fact: str = Field(..., description="Body of the sentence, as part of a response")
substring_quote: List[str] = Field(
...,
description=(
"Each source should be a direct quote from the context, "
"as a substring of the original content"
),
)
def _get_span(self, quote: str, context: str, errs: int = 100) -> Iterator[str]:
import regex
minor = quote
major = context
errs_ = 0
s = regex.search(f"({minor}){{e<={errs_}}}", major)
while s is None and errs_ <= errs:
errs_ += 1
s = regex.search(f"({minor}){{e<={errs_}}}", major)
if s is not None:
yield from s.spans()
def get_spans(self, context: str) -> Iterator[str]:
for quote in self.substring_quote:
yield from self._get_span(quote, context)
class QuestionAnswer(BaseModel):
"""A question and its answer as a list of facts each one should have a source.
each sentence contains a body and a list of sources."""
question: str = Field(..., description="Question that was asked")
answer: List[FactWithEvidence] = Field(
...,
description=(
"Body of the answer, each fact should be "
"its separate object with a body and a list of sources"
),
)
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
"""Create a citation fuzzy match chain.
Args:
llm: Language model to use for the chain.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
schema = QuestionAnswer.schema()
function = {
"name": schema["title"],
"description": schema["description"],
"parameters": schema,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions with correct and exact citations."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(
content=(
"Tips: Make sure to cite your sources, "
"and use the exact words from the context."
)
),
]
prompt = ChatPromptTemplate(messages=messages)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
)
return chain
| [
"{context}",
"Question: {question}",
"You are a world class algorithm to answer questions with correct and exact citations.",
"Answer question using the following context",
"Tips: Make sure to cite your sources, and use the exact words from the context."
] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~qdrant~async_api~test_from_texts.py | import uuid
from typing import Optional
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
from tests.integration_tests.vectorstores.qdrant.common import qdrant_is_not_running
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_duplicated_texts(qdrant_location: str) -> None:
"""Test end to end Qdrant.afrom_texts stores duplicated texts separately."""
collection_name = uuid.uuid4().hex
vec_store = await Qdrant.afrom_texts(
["abc", "abc"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
location=qdrant_location,
)
client = vec_store.client
assert 2 == client.count(collection_name).count
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_ids(
batch_size: int, vector_name: Optional[str], qdrant_location: str
) -> None:
"""Test end to end Qdrant.afrom_texts stores provided ids."""
collection_name = uuid.uuid4().hex
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
vec_store = await Qdrant.afrom_texts(
["abc", "def"],
ConsistentFakeEmbeddings(),
ids=ids,
collection_name=collection_name,
batch_size=batch_size,
vector_name=vector_name,
location=qdrant_location,
)
client = vec_store.client
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.parametrize("vector_name", ["custom-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_embeddings_as_named_vectors(
vector_name: str,
qdrant_location: str,
) -> None:
"""Test end to end Qdrant.afrom_texts stores named vectors if name is provided."""
collection_name = uuid.uuid4().hex
vec_store = await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
vector_name=vector_name,
location=qdrant_location,
)
client = vec_store.client
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_reuses_same_collection(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts reuses the same collection"""
collection_name = uuid.uuid4().hex
embeddings = ConsistentFakeEmbeddings()
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
embeddings,
collection_name=collection_name,
vector_name=vector_name,
)
vec_store = await Qdrant.afrom_texts(
["foo", "bar"],
embeddings,
collection_name=collection_name,
vector_name=vector_name,
)
client = vec_store.client
assert 7 == client.count(collection_name).count
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_dimensionality(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts raises an exception if dimensionality does not
match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=vector_name,
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=vector_name,
)
@pytest.mark.parametrize(
["first_vector_name", "second_vector_name"],
[
(None, "custom-vector"),
("custom-vector", None),
("my-first-vector", "my-second_vector"),
],
)
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_vector_name(
first_vector_name: Optional[str],
second_vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts raises an exception if vector name does not match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=first_vector_name,
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=second_vector_name,
)
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
"""Test if Qdrant.afrom_texts raises an exception if distance does not match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
distance_func="Cosine",
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
distance_func="Euclid",
)
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_recreates_collection_on_force_recreate(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=vector_name,
)
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=vector_name,
force_recreate=True,
)
client = QdrantClient()
assert 2 == client.count(collection_name).count
vector_params = client.get_collection(collection_name).config.params.vectors
if vector_name is not None:
vector_params = vector_params[vector_name] # type: ignore[index]
assert 5 == vector_params.size # type: ignore[union-attr]
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_metadatas(
batch_size: int,
content_payload_key: str,
metadata_payload_key: str,
qdrant_location: str,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = await Qdrant.afrom_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
batch_size=batch_size,
location=qdrant_location,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~chains~test_graph_qa.py | from typing import Any, Dict, List
import pandas as pd
from libs.core.langchain_core.prompts import PromptTemplate
from langchain.chains.graph_qa.cypher import (
GraphCypherQAChain,
construct_schema,
extract_cypher,
)
from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema
from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT
from langchain.graphs.graph_document import GraphDocument
from langchain.graphs.graph_store import GraphStore
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
class FakeGraphStore(GraphStore):
@property
def get_schema(self) -> str:
"""Returns the schema of the Graph database"""
return ""
@property
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the schema of the Graph database"""
return {}
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query the graph."""
return []
def refresh_schema(self) -> None:
"""Refreshes the graph schema information."""
pass
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""Take GraphDocument as input as uses it to construct a graph."""
pass
def test_graph_cypher_qa_chain_prompt_selection_1() -> None:
# Pass prompts directly. No kwargs is specified.
qa_prompt_template = "QA Prompt"
cypher_prompt_template = "Cypher Prompt"
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[])
cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[])
chain = GraphCypherQAChain.from_llm(
llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
qa_prompt=qa_prompt,
cypher_prompt=cypher_prompt,
)
assert chain.qa_chain.prompt == qa_prompt
assert chain.cypher_generation_chain.prompt == cypher_prompt
def test_graph_cypher_qa_chain_prompt_selection_2() -> None:
# Default case. Pass nothing
chain = GraphCypherQAChain.from_llm(
llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
)
assert chain.qa_chain.prompt == CYPHER_QA_PROMPT
assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
def test_graph_cypher_qa_chain_prompt_selection_3() -> None:
# Pass non-prompt args only to sub-chains via kwargs
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
chain = GraphCypherQAChain.from_llm(
llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
cypher_llm_kwargs={"memory": readonlymemory},
qa_llm_kwargs={"memory": readonlymemory},
)
assert chain.qa_chain.prompt == CYPHER_QA_PROMPT
assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT
def test_graph_cypher_qa_chain_prompt_selection_4() -> None:
# Pass prompt, non-prompt args to subchains via kwargs
qa_prompt_template = "QA Prompt"
cypher_prompt_template = "Cypher Prompt"
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[])
cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[])
chain = GraphCypherQAChain.from_llm(
llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
cypher_llm_kwargs={"prompt": cypher_prompt, "memory": readonlymemory},
qa_llm_kwargs={"prompt": qa_prompt, "memory": readonlymemory},
)
assert chain.qa_chain.prompt == qa_prompt
assert chain.cypher_generation_chain.prompt == cypher_prompt
def test_graph_cypher_qa_chain_prompt_selection_5() -> None:
# Can't pass both prompt and kwargs at the same time
qa_prompt_template = "QA Prompt"
cypher_prompt_template = "Cypher Prompt"
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[])
cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[])
try:
GraphCypherQAChain.from_llm(
llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
qa_prompt=qa_prompt,
cypher_prompt=cypher_prompt,
cypher_llm_kwargs={"memory": readonlymemory},
qa_llm_kwargs={"memory": readonlymemory},
)
assert False
except ValueError:
assert True
def test_graph_cypher_qa_chain() -> None:
template = """You are a nice chatbot having a conversation with a human.
Schema:
{schema}
Previous conversation:
{chat_history}
New human question: {question}
Response:"""
prompt = PromptTemplate(
input_variables=["schema", "question", "chat_history"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
prompt1 = (
"You are a nice chatbot having a conversation with a human.\n\n "
"Schema:\n Node properties are the following:\n\nRelationship "
"properties are the following:\n\nThe relationships are the "
"following:\n\n\n "
"Previous conversation:\n \n\n New human question: "
"Test question\n Response:"
)
prompt2 = (
"You are a nice chatbot having a conversation with a human.\n\n "
"Schema:\n Node properties are the following:\n\nRelationship "
"properties are the following:\n\nThe relationships are the "
"following:\n\n\n "
"Previous conversation:\n Human: Test question\nAI: foo\n\n "
"New human question: Test new question\n Response:"
)
llm = FakeLLM(queries={prompt1: "answer1", prompt2: "answer2"})
chain = GraphCypherQAChain.from_llm(
cypher_llm=llm,
qa_llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
cypher_llm_kwargs={"prompt": prompt, "memory": readonlymemory},
memory=memory,
)
chain.run("Test question")
chain.run("Test new question")
# If we get here without a key error, that means memory
# was used properly to create prompts.
assert True
def test_no_backticks() -> None:
"""Test if there are no backticks, so the original text should be returned."""
query = "MATCH (n) RETURN n"
output = extract_cypher(query)
assert output == query
def test_backticks() -> None:
"""Test if there are backticks. Query from within backticks should be returned."""
query = "You can use the following query: ```MATCH (n) RETURN n```"
output = extract_cypher(query)
assert output == "MATCH (n) RETURN n"
def test_exclude_types() -> None:
structured_schema = {
"node_props": {
"Movie": [{"property": "title", "type": "STRING"}],
"Actor": [{"property": "name", "type": "STRING"}],
"Person": [{"property": "name", "type": "STRING"}],
},
"rel_props": {},
"relationships": [
{"start": "Actor", "end": "Movie", "type": "ACTED_IN"},
{"start": "Person", "end": "Movie", "type": "DIRECTED"},
],
}
exclude_types = ["Person", "DIRECTED"]
output = construct_schema(structured_schema, [], exclude_types)
expected_schema = (
"Node properties are the following:\n"
"Movie {title: STRING},Actor {name: STRING}\n"
"Relationship properties are the following:\n\n"
"The relationships are the following:\n"
"(:Actor)-[:ACTED_IN]->(:Movie)"
)
assert output == expected_schema
def test_include_types() -> None:
structured_schema = {
"node_props": {
"Movie": [{"property": "title", "type": "STRING"}],
"Actor": [{"property": "name", "type": "STRING"}],
"Person": [{"property": "name", "type": "STRING"}],
},
"rel_props": {},
"relationships": [
{"start": "Actor", "end": "Movie", "type": "ACTED_IN"},
{"start": "Person", "end": "Movie", "type": "DIRECTED"},
],
}
include_types = ["Movie", "Actor", "ACTED_IN"]
output = construct_schema(structured_schema, include_types, [])
expected_schema = (
"Node properties are the following:\n"
"Movie {title: STRING},Actor {name: STRING}\n"
"Relationship properties are the following:\n\n"
"The relationships are the following:\n"
"(:Actor)-[:ACTED_IN]->(:Movie)"
)
assert output == expected_schema
def test_include_types2() -> None:
structured_schema = {
"node_props": {
"Movie": [{"property": "title", "type": "STRING"}],
"Actor": [{"property": "name", "type": "STRING"}],
"Person": [{"property": "name", "type": "STRING"}],
},
"rel_props": {},
"relationships": [
{"start": "Actor", "end": "Movie", "type": "ACTED_IN"},
{"start": "Person", "end": "Movie", "type": "DIRECTED"},
],
}
include_types = ["Movie", "Actor"]
output = construct_schema(structured_schema, include_types, [])
expected_schema = (
"Node properties are the following:\n"
"Movie {title: STRING},Actor {name: STRING}\n"
"Relationship properties are the following:\n\n"
"The relationships are the following:\n"
)
assert output == expected_schema
def test_include_types3() -> None:
structured_schema = {
"node_props": {
"Movie": [{"property": "title", "type": "STRING"}],
"Actor": [{"property": "name", "type": "STRING"}],
"Person": [{"property": "name", "type": "STRING"}],
},
"rel_props": {},
"relationships": [
{"start": "Actor", "end": "Movie", "type": "ACTED_IN"},
{"start": "Person", "end": "Movie", "type": "DIRECTED"},
],
}
include_types = ["Movie", "Actor", "ACTED_IN"]
output = construct_schema(structured_schema, include_types, [])
expected_schema = (
"Node properties are the following:\n"
"Movie {title: STRING},Actor {name: STRING}\n"
"Relationship properties are the following:\n\n"
"The relationships are the following:\n"
"(:Actor)-[:ACTED_IN]->(:Movie)"
)
assert output == expected_schema
def test_validating_cypher_statements() -> None:
cypher_file = "tests/unit_tests/data/cypher_corrector.csv"
examples = pd.read_csv(cypher_file)
examples.fillna("", inplace=True)
for _, row in examples.iterrows():
schema = load_schemas(row["schema"])
corrector = CypherQueryCorrector(schema)
assert corrector(row["statement"]) == row["correct_query"]
def load_schemas(str_schemas: str) -> List[Schema]:
"""
Args:
str_schemas: string of schemas
"""
values = str_schemas.replace("(", "").replace(")", "").split(",")
schemas = []
for i in range(len(values) // 3):
schemas.append(
Schema(
values[i * 3].strip(),
values[i * 3 + 1].strip(),
values[i * 3 + 2].strip(),
)
)
return schemas
| [
"You are a nice chatbot having a conversation with a human.\n\n Schema:\n Node properties are the following:\n\nRelationship properties are the following:\n\nThe relationships are the following:\n\n\n Previous conversation:\n \n\n New human question: Test question\n Response:",
"Cypher Prompt",
"You are a nice chatbot having a conversation with a human.\n\n Schema:\n Node properties are the following:\n\nRelationship properties are the following:\n\nThe relationships are the following:\n\n\n Previous conversation:\n Human: Test question\nAI: foo\n\n New human question: Test new question\n Response:",
"question",
"chat_history",
"QA Prompt",
"You are a nice chatbot having a conversation with a human.\n\n Schema:\n {schema}\n\n Previous conversation:\n {chat_history}\n\n New human question: {question}\n Response:"
] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~llms~test_symblai_nebula.py | """Test the Nebula model by Symbl.ai"""
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.symblai_nebula import Nebula
def test_api_key_is_secret_string() -> None:
llm = Nebula(nebula_api_key="secret-api-key")
assert isinstance(llm.nebula_api_key, SecretStr)
assert llm.nebula_api_key.get_secret_value() == "secret-api-key"
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
monkeypatch.setenv("NEBULA_API_KEY", "secret-api-key")
llm = Nebula()
print(llm.nebula_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None:
llm = Nebula(nebula_api_key="secret-api-key")
print(llm.nebula_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~bageldb.py | from __future__ import annotations
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
if TYPE_CHECKING:
import bagel
import bagel.config
from bagel.api.types import ID, OneOrMany, Where, WhereDocument
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import xor_args
from libs.core.langchain_core.vectorstores import VectorStore
DEFAULT_K = 5
def _results_to_docs(results: Any) -> List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Bagel(VectorStore):
"""``BagelDB.ai`` vector store.
To use, you should have the ``betabageldb`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import Bagel
vectorstore = Bagel(cluster_name="langchain_store")
"""
_LANGCHAIN_DEFAULT_CLUSTER_NAME = "langchain"
def __init__(
self,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
embedding_function: Optional[Embeddings] = None,
cluster_metadata: Optional[Dict] = None,
client: Optional[bagel.Client] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with bagel client"""
try:
import bagel
import bagel.config
except ImportError:
raise ImportError("Please install bagel `pip install betabageldb`.")
if client is not None:
self._client_settings = client_settings
self._client = client
else:
if client_settings:
_client_settings = client_settings
else:
_client_settings = bagel.config.Settings(
bagel_api_impl="rest",
bagel_server_host="api.bageldb.ai",
)
self._client_settings = _client_settings
self._client = bagel.Client(_client_settings)
self._cluster = self._client.get_or_create_cluster(
name=cluster_name,
metadata=cluster_metadata,
)
self.override_relevance_score_fn = relevance_score_fn
self._embedding_function = embedding_function
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_cluster(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Query the BagelDB cluster based on the provided parameters."""
try:
import bagel # noqa: F401
except ImportError:
raise ImportError("Please install bagel `pip install betabageldb`.")
return self._cluster.find(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
**kwargs,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> List[str]:
"""
Add texts along with their corresponding embeddings and optional
metadata to the BagelDB cluster.
Args:
texts (Iterable[str]): Texts to be added.
embeddings (Optional[List[float]]): List of embeddingvectors
metadatas (Optional[List[dict]]): Optional list of metadatas.
ids (Optional[List[str]]): List of unique ID for the texts.
Returns:
List[str]: List of unique ID representing the added texts.
"""
# creating unique ids if None
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
texts = list(texts)
if self._embedding_function and embeddings is None and texts:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, metadata in enumerate(metadatas):
if metadata:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
self._cluster.upsert(
embeddings=embeddings_with_metadatas,
metadatas=metadatas,
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._cluster.upsert(
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
metadatas = [{}] * len(texts)
self._cluster.upsert(
embeddings=embeddings,
documents=texts,
metadatas=metadatas,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""
Run a similarity search with BagelDB.
Args:
query (str): The query text to search for similar documents/texts.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Metadata filters to narrow down.
Returns:
List[Document]: List of documents objects representing
the documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, where=where)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run a similarity search with BagelDB and return documents with their
corresponding similarity scores.
Args:
query (str): The query text to search for similar documents.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Filter using metadata.
Returns:
List[Tuple[Document, float]]: List of tuples, each containing a
Document object representing a similar document and its
corresponding similarity score.
"""
results = self.__query_cluster(query_texts=[query], n_results=k, where=where)
return _results_to_docs_and_scores(results)
@classmethod
def from_texts(
cls: Type[Bagel],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
cluster_metadata: Optional[Dict] = None,
client: Optional[bagel.Client] = None,
text_embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> Bagel:
"""
Create and initialize a Bagel instance from list of texts.
Args:
texts (List[str]): List of text content to be added.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
cluster_metadata (Optional[Dict]): Metadata of the cluster.
embeddings (Optional[Embeddings]): List of embedding.
metadatas (Optional[List[dict]]): List of metadata.
ids (Optional[List[str]]): List of unique ID. Defaults to None.
client (Optional[bagel.Client]): Bagel client instance.
Returns:
Bagel: Bagel vectorstore.
"""
bagel_cluster = cls(
cluster_name=cluster_name,
embedding_function=embedding,
client_settings=client_settings,
client=client,
cluster_metadata=cluster_metadata,
**kwargs,
)
_ = bagel_cluster.add_texts(
texts=texts, embeddings=text_embeddings, metadatas=metadatas, ids=ids
)
return bagel_cluster
def delete_cluster(self) -> None:
"""Delete the cluster."""
self._client.delete_cluster(self._cluster.name)
def similarity_search_by_vector_with_relevance_scores(
self,
query_embeddings: List[float],
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
"""
results = self.__query_cluster(
query_embeddings=query_embeddings, n_results=k, where=where
)
return _results_to_docs_and_scores(results)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector."""
results = self.__query_cluster(
query_embeddings=embedding, n_results=k, where=where
)
return _results_to_docs(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
Select and return the appropriate relevance score function based
on the distance metric used in the BagelDB cluster.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._cluster.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function for distance"
f" metric of type: {distance}. Consider providing"
" relevance_score_fn to Bagel constructor."
)
@classmethod
def from_documents(
cls: Type[Bagel],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
client: Optional[bagel.Client] = None,
cluster_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Bagel:
"""
Create a Bagel vectorstore from a list of documents.
Args:
documents (List[Document]): List of Document objects to add to the
Bagel vectorstore.
embedding (Optional[List[float]]): List of embedding.
ids (Optional[List[str]]): List of IDs. Defaults to None.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
client (Optional[bagel.Client]): Bagel client instance.
cluster_metadata (Optional[Dict]): Metadata associated with the
Bagel cluster. Defaults to None.
Returns:
Bagel: Bagel vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
cluster_name=cluster_name,
client_settings=client_settings,
client=client,
cluster_metadata=cluster_metadata,
**kwargs,
)
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the cluster.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
self._cluster.update(
ids=[document_id],
documents=[text],
metadatas=[metadata],
)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection."""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._cluster.get(**kwargs)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
Delete by IDs.
Args:
ids: List of ids to delete.
"""
self._cluster.delete(ids=ids)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~pai_eas_endpoint.py | import json
import logging
from typing import Any, Dict, Iterator, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PaiEasEndpoint(LLM):
"""Langchain LLM class to help to access eass llm service.
To use this endpoint, must have a deployed eas chat llm service on PAI AliCloud.
One can set the environment variable ``eas_service_url`` and ``eas_service_token``.
The environment variables can set with your eas service url and service token.
Example:
.. code-block:: python
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
eas_chat_endpoint = PaiEasChatEndpoint(
eas_service_url="your_service_url",
eas_service_token="your_service_token"
)
"""
"""PAI-EAS Service URL"""
eas_service_url: str
"""PAI-EAS Service TOKEN"""
eas_service_token: str
"""PAI-EAS Service Infer Params"""
max_new_tokens: Optional[int] = 512
temperature: Optional[float] = 0.95
top_p: Optional[float] = 0.1
top_k: Optional[int] = 0
stop_sequences: Optional[List[str]] = None
"""Enable stream chat mode."""
streaming: bool = False
"""Key/value arguments to pass to the model. Reserved for future use"""
model_kwargs: Optional[dict] = None
version: Optional[str] = "2.0"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["eas_service_url"] = get_from_dict_or_env(
values, "eas_service_url", "EAS_SERVICE_URL"
)
values["eas_service_token"] = get_from_dict_or_env(
values, "eas_service_token", "EAS_SERVICE_TOKEN"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pai_eas_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": [],
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"eas_service_url": self.eas_service_url,
"eas_service_token": self.eas_service_token,
**_model_kwargs,
}
def _invocation_params(
self, stop_sequences: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop_sequences is not None:
params["stop"] = self.stop_sequences
else:
params["stop"] = stop_sequences
if self.model_kwargs:
params.update(self.model_kwargs)
return {**params, **kwargs}
@staticmethod
def _process_response(
response: Any, stop: Optional[List[str]], version: Optional[str]
) -> str:
if version == "1.0":
text = response
else:
text = response["response"]
if stop:
text = enforce_stop_tokens(text, stop)
return "".join(text)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = None
try:
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **params):
completion += chunk.text
return completion
else:
response = self._call_eas(prompt, params)
_stop = params.get("stop")
return self._process_response(response, _stop, self.version)
except Exception as error:
raise ValueError(f"Error raised by the service: {error}")
def _call_eas(self, prompt: str = "", params: Dict = {}) -> Any:
"""Generate text from the eas service."""
headers = {
"Content-Type": "application/json",
"Authorization": f"{self.eas_service_token}",
}
if self.version == "1.0":
body = {
"input_ids": f"{prompt}",
}
else:
body = {
"prompt": f"{prompt}",
}
# add params to body
for key, value in params.items():
body[key] = value
# make request
response = requests.post(self.eas_service_url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f"Request failed with status code {response.status_code}"
f" and message {response.text}"
)
try:
return json.loads(response.text)
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return response.text
raise e
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
headers = {
"User-Agent": "Test Client",
"Authorization": f"{self.eas_service_token}",
}
if self.version == "1.0":
pload = {"input_ids": prompt, **invocation_params}
response = requests.post(
self.eas_service_url, headers=headers, json=pload, stream=True
)
res = GenerationChunk(text=response.text)
if run_manager:
run_manager.on_llm_new_token(res.text)
# yield text, if any
yield res
else:
pload = {"prompt": prompt, "use_stream_chat": "True", **invocation_params}
response = requests.post(
self.eas_service_url, headers=headers, json=pload, stream=True
)
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
):
if chunk:
data = json.loads(chunk.decode("utf-8"))
output = data["response"]
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop"]:
if stop_seq in output:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = output[: output.index(stop_seq_found)]
else:
text = output
# yield text, if any
if text:
res = GenerationChunk(text=text)
yield res
if run_manager:
run_manager.on_llm_new_token(res.text)
# break if stop sequence found
if stop_seq_found:
break
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~rocksetdb.py | from typing import Any, Callable, Iterator, List, Optional, Tuple
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def default_joiner(docs: List[Tuple[str, Any]]) -> str:
"""Default joiner for content columns."""
return "\n".join([doc[1] for doc in docs])
class ColumnNotFoundError(Exception):
"""Column not found error."""
def __init__(self, missing_key: str, query: str):
super().__init__(f'Column "{missing_key}" not selected in query:\n{query}')
class RocksetLoader(BaseLoader):
"""Load from a `Rockset` database.
To use, you should have the `rockset` python package installed.
Example:
.. code-block:: python
# This code will load 3 records from the "langchain_demo"
# collection as Documents, with the `text` column used as
# the content
from langchain_community.document_loaders import RocksetLoader
from rockset import RocksetClient, Regions, models
loader = RocksetLoader(
RocksetClient(Regions.usw2a1, "<api key>"),
models.QueryRequestSql(
query="select * from langchain_demo limit 3"
),
["text"]
)
)
"""
def __init__(
self,
client: Any,
query: Any,
content_keys: List[str],
metadata_keys: Optional[List[str]] = None,
content_columns_joiner: Callable[[List[Tuple[str, Any]]], str] = default_joiner,
):
"""Initialize with Rockset client.
Args:
client: Rockset client object.
query: Rockset query object.
content_keys: The collection columns to be written into the `page_content`
of the Documents.
metadata_keys: The collection columns to be written into the `metadata` of
the Documents. By default, this is all the keys in the document.
content_columns_joiner: Method that joins content_keys and its values into a
string. It's method that takes in a List[Tuple[str, Any]]],
representing a list of tuples of (column name, column value).
By default, this is a method that joins each column value with a new
line. This method is only relevant if there are multiple content_keys.
"""
try:
from rockset import QueryPaginator, RocksetClient
from rockset.models import QueryRequestSql
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
if not isinstance(client, RocksetClient):
raise ValueError(
f"client should be an instance of rockset.RocksetClient, "
f"got {type(client)}"
)
if not isinstance(query, QueryRequestSql):
raise ValueError(
f"query should be an instance of rockset.model.QueryRequestSql, "
f"got {type(query)}"
)
self.client = client
self.query = query
self.content_keys = content_keys
self.content_columns_joiner = content_columns_joiner
self.metadata_keys = metadata_keys
self.paginator = QueryPaginator
self.request_model = QueryRequestSql
try:
self.client.set_application("langchain")
except AttributeError:
# ignore
pass
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
query_results = self.client.Queries.query(
sql=self.query
).results # execute the SQL query
for doc in query_results: # for each doc in the response
try:
yield Document(
page_content=self.content_columns_joiner(
[(col, doc[col]) for col in self.content_keys]
),
metadata={col: doc[col] for col in self.metadata_keys}
if self.metadata_keys is not None
else doc,
) # try to yield the Document
except (
KeyError
) as e: # either content_columns or metadata_columns is invalid
raise ColumnNotFoundError(
e.args[0], self.query
) # raise that the column isn't in the db schema
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~ollama.py | import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
from libs.core.langchain_core._api import deprecated
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_community.llms.ollama import OllamaEndpointNotFoundError, _OllamaCommon
@deprecated("0.0.3", alternative="_chat_stream_response_to_chat_generation_chunk")
def _stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(content=parsed_response.get("response", "")),
generation_info=generation_info,
)
def _chat_stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(
content=parsed_response.get("message", {}).get("content", "")
),
generation_info=generation_info,
)
class ChatOllama(BaseChatModel, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatOllama
ollama = ChatOllama(model="llama2")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "ollama-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@deprecated("0.0.3", alternative="_convert_messages_to_ollama_messages")
def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
if message.content[0].get("type") == "text":
message_text = f"[INST] {message.content[0]['text']} [/INST]"
elif message.content[0].get("type") == "image_url":
message_text = message.content[0]["image_url"]["url"]
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _format_messages_as_text(self, messages: List[BaseMessage]) -> str:
return "\n".join(
[self._format_message_as_text(message) for message in messages]
)
def _convert_messages_to_ollama_messages(
self, messages: List[BaseMessage]
) -> List[Dict[str, Union[str, List[str]]]]:
ollama_messages = []
for message in messages:
role = ""
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
else:
raise ValueError("Received unsupported message type for Ollama.")
content = ""
images = []
if isinstance(message.content, str):
content = message.content
else:
for content_part in message.content:
if content_part.get("type") == "text":
content += f"\n{content_part['text']}"
elif content_part.get("type") == "image_url":
if isinstance(content_part.get("image_url"), str):
image_url_components = content_part["image_url"].split(",")
# Support data:image/jpeg;base64,<image> format
# and base64 strings
if len(image_url_components) > 1:
images.append(image_url_components[1])
else:
images.append(image_url_components[0])
else:
raise ValueError(
"Only string image_url " "content parts are supported."
)
else:
raise ValueError(
"Unsupported message content type. "
"Must either have type 'text' or type 'image_url' "
"with a string 'image_url' field."
)
ollama_messages.append(
{
"role": role,
"content": content,
"images": images,
}
)
return ollama_messages
def _create_chat_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
payload = {
"messages": self._convert_messages_to_ollama_messages(messages),
}
yield from self._create_stream(
payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat/", **kwargs
)
async def _acreate_chat_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
payload = {
"messages": self._convert_messages_to_ollama_messages(messages),
}
async for stream_resp in self._acreate_stream(
payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat/", **kwargs
):
yield stream_resp
def _chat_stream_with_aggregation(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk: Optional[ChatGenerationChunk] = None
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
async def _achat_stream_with_aggregation(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk: Optional[ChatGenerationChunk] = None
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = self._chat_stream_with_aggregation(
messages,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = await self._achat_stream_with_aggregation(
messages,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
try:
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
except OllamaEndpointNotFoundError:
yield from self._legacy_stream(messages, stop, **kwargs)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
try:
async for stream_resp in self._acreate_chat_stream(
messages, stop, **kwargs
):
if stream_resp:
chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
except OllamaEndpointNotFoundError:
async for chunk in self._legacy_astream(messages, stop, **kwargs):
yield chunk
@deprecated("0.0.3", alternative="_stream")
def _legacy_stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._format_messages_as_text(messages)
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_chat_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~model_laboratory.py | """Experiment with different models."""
from __future__ import annotations
from typing import List, Optional, Sequence
from libs.core.langchain_core.language_models.llms import BaseLLM
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from libs.core.langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""Experiment with different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None):
"""Initialize with chains to experiment with.
Args:
chains: list of chains to experiment with.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n")
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
| [
"_input",
"{_input}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~mlflow_gateway.py | from __future__ import annotations
import warnings
from typing import Any, Iterator, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowAIGatewayEmbeddings(Embeddings, BaseModel):
"""
Wrapper around embeddings LLMs in the MLflow AI Gateway.
To use, you should have the ``mlflow[gateway]`` python package installed.
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
Example:
.. code-block:: python
from langchain_community.embeddings import MlflowAIGatewayEmbeddings
embeddings = MlflowAIGatewayEmbeddings(
gateway_uri="<your-mlflow-ai-gateway-uri>",
route="<your-mlflow-ai-gateway-embeddings-route>"
)
"""
route: str
"""The route to use for the MLflow AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the MLflow AI Gateway API."""
def __init__(self, **kwargs: Any):
warnings.warn(
"`MlflowAIGatewayEmbeddings` is deprecated. Use `MlflowEmbeddings` or "
"`DatabricksEmbeddings` instead.",
DeprecationWarning,
)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
def _query(self, texts: List[str]) -> List[List[float]]:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
embeddings = []
for txt in _chunk(texts, 20):
resp = mlflow.gateway.query(self.route, data={"text": txt})
embeddings.append(resp["embeddings"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_dataframe.py | import pandas as pd
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders import DataFrameLoader
@pytest.fixture
def sample_data_frame() -> pd.DataFrame:
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
return pd.DataFrame(data)
def test_load_returns_list_of_documents(sample_data_frame: pd.DataFrame) -> None:
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
def test_load_converts_dataframe_columns_to_document_metadata(
sample_data_frame: pd.DataFrame,
) -> None:
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata["author"] == sample_data_frame.loc[i, "author"]
assert doc.metadata["date"] == sample_data_frame.loc[i, "date"]
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pd.DataFrame,
) -> None:
sample_data_frame = sample_data_frame.rename(columns={"text": "dummy_test_column"})
loader = DataFrameLoader(sample_data_frame, page_content_column="dummy_test_column")
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_loaders~whatsapp.py | import logging
import os
import re
import zipfile
from typing import Iterator, List, Union
from libs.core.langchain_core.chat_sessions import ChatSession
from libs.core.langchain_core.messages import AIMessage, HumanMessage
from langchain_community.chat_loaders.base import BaseChatLoader
logger = logging.getLogger(__name__)
class WhatsAppChatLoader(BaseChatLoader):
"""Load `WhatsApp` conversations from a dump zip file or directory."""
def __init__(self, path: str):
"""Initialize the WhatsAppChatLoader.
Args:
path (str): Path to the exported WhatsApp chat
zip directory, folder, or file.
To generate the dump, open the chat, click the three dots in the top
right corner, and select "More". Then select "Export chat" and
choose "Without media".
"""
self.path = path
ignore_lines = [
"This message was deleted",
"<Media omitted>",
"image omitted",
"Messages and calls are end-to-end encrypted. No one outside of this chat,"
" not even WhatsApp, can read or listen to them.",
]
self._ignore_lines = re.compile(
r"(" + "|".join([r"\u200E*" + line for line in ignore_lines]) + r")",
flags=re.IGNORECASE,
)
self._message_line_regex = re.compile(
r"\u200E*\[?(\d{1,2}/\d{1,2}/\d{2,4}, \d{1,2}:\d{2}:\d{2} (?:AM|PM))\]?[ \u200E]*([^:]+): (.+)", # noqa
flags=re.IGNORECASE,
)
def _load_single_chat_session(self, file_path: str) -> ChatSession:
"""Load a single chat session from a file.
Args:
file_path (str): Path to the chat file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, "r", encoding="utf-8") as file:
txt = file.read()
# Split messages by newlines, but keep multi-line messages grouped
chat_lines: List[str] = []
current_message = ""
for line in txt.split("\n"):
if self._message_line_regex.match(line):
if current_message:
chat_lines.append(current_message)
current_message = line
else:
current_message += " " + line.strip()
if current_message:
chat_lines.append(current_message)
results: List[Union[HumanMessage, AIMessage]] = []
for line in chat_lines:
result = self._message_line_regex.match(line.strip())
if result:
timestamp, sender, text = result.groups()
if not self._ignore_lines.match(text.strip()):
results.append(
HumanMessage(
role=sender,
content=text,
additional_kwargs={
"sender": sender,
"events": [{"message_time": timestamp}],
},
)
)
else:
logger.debug(f"Could not parse line: {line}")
return ChatSession(messages=results)
def _iterate_files(self, path: str) -> Iterator[str]:
"""Iterate over the files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: The path to each file.
"""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith(".txt"):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith(".txt"):
yield zip_file.extract(file)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy load the messages from the chat file and yield
them as chat sessions.
Yields:
Iterator[ChatSession]: The loaded chat sessions.
"""
yield self._load_single_chat_session(self.path)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~spreedly.py | import json
import urllib.request
from typing import List
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.utils import stringify_dict
from langchain_community.document_loaders.base import BaseLoader
SPREEDLY_ENDPOINTS = {
"gateways_options": "https://core.spreedly.com/v1/gateways_options.json",
"gateways": "https://core.spreedly.com/v1/gateways.json",
"receivers_options": "https://core.spreedly.com/v1/receivers_options.json",
"receivers": "https://core.spreedly.com/v1/receivers.json",
"payment_methods": "https://core.spreedly.com/v1/payment_methods.json",
"certificates": "https://core.spreedly.com/v1/certificates.json",
"transactions": "https://core.spreedly.com/v1/transactions.json",
"environments": "https://core.spreedly.com/v1/environments.json",
}
class SpreedlyLoader(BaseLoader):
"""Load from `Spreedly` API."""
def __init__(self, access_token: str, resource: str) -> None:
"""Initialize with an access token and a resource.
Args:
access_token: The access token.
resource: The resource.
"""
self.access_token = access_token
self.resource = resource
self.headers = {
"Authorization": f"Bearer {self.access_token}",
"Accept": "application/json",
}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = SPREEDLY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
def load(self) -> List[Document]:
return self._get_resource()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~pubmed.py | from typing import List
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
"""`PubMed API` retriever.
It wraps load() to get_relevant_documents().
It uses all PubMedAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load_docs(query=query)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~rwkv.py | """RWKV models.
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
"""
from typing import Any, Dict, List, Mapping, Optional, Set
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from langchain_community.llms.utils import enforce_stop_tokens
class RWKV(LLM, BaseModel):
"""RWKV language models.
To use, you should have the ``rwkv`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain_community.llms import RWKV
model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32")
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained RWKV model file."""
tokens_path: str
"""Path to the RWKV tokens file."""
strategy: str = "cpu fp32"
"""Token context window."""
rwkv_verbose: bool = True
"""Print debug information."""
temperature: float = 1.0
"""The temperature to use for sampling."""
top_p: float = 0.5
"""The top-p value to use for sampling."""
penalty_alpha_frequency: float = 0.4
"""Positive values penalize new tokens based on their existing frequency
in the text so far, decreasing the model's likelihood to repeat the same
line verbatim.."""
penalty_alpha_presence: float = 0.4
"""Positive values penalize new tokens based on whether they appear
in the text so far, increasing the model's likelihood to talk about
new topics.."""
CHUNK_LEN: int = 256
"""Batch size for prompt processing."""
max_tokens_per_generation: int = 256
"""Maximum number of tokens to generate."""
client: Any = None #: :meta private:
tokenizer: Any = None #: :meta private:
pipeline: Any = None #: :meta private:
model_tokens: Any = None #: :meta private:
model_state: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"verbose": self.verbose,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_alpha_frequency": self.penalty_alpha_frequency,
"penalty_alpha_presence": self.penalty_alpha_presence,
"CHUNK_LEN": self.CHUNK_LEN,
"max_tokens_per_generation": self.max_tokens_per_generation,
}
@staticmethod
def _rwkv_param_names() -> Set[str]:
"""Get the identifying parameters."""
return {
"verbose",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
"Could not import tokenizers python package. "
"Please install it with `pip install tokenizers`."
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs["verbose"] = values["rwkv_verbose"]
values["client"] = RWKVMODEL(
values["model"], strategy=values["strategy"], **model_kwargs
)
values["pipeline"] = PIPELINE(values["client"], values["tokens_path"])
except ImportError:
raise ImportError(
"Could not import rwkv python package. "
"Please install it with `pip install rwkv`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params,
**{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "rwkv"
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ",:?!"
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
def rwkv_generate(self, prompt: str) -> str:
self.model_state = None
self.model_tokens = []
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
decoded = ""
for i in range(self.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p
)
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = self.run_rnn([token])
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
if "\ufffd" not in xxx: # avoid utf-8 display issues
decoded += xxx
out_last = begin + i + 1
if i >= self.max_tokens_per_generation - 100:
break
return decoded
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""RWKV generation
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text = self.rwkv_generate(prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_neo4j.py | import json
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import Neo4jChatMessageHistory
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup MongoDB as a message store
message_history = Neo4jChatMessageHistory(session_id="test-session")
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Azure Cosmos DB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~parsers~generic.py | """Code for generic / auxiliary parsers.
This module contains some logic to help assemble more sophisticated parsers.
"""
from typing import Iterator, Mapping, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders.schema import Blob
class MimeTypeBasedParser(BaseBlobParser):
"""Parser that uses `mime`-types to parse a blob.
This parser is useful for simple pipelines where the mime-type is sufficient
to determine how to parse a blob.
To use, configure handlers based on mime-types and pass them to the initializer.
Example:
.. code-block:: python
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
parser = MimeTypeBasedParser(
handlers={
"application/pdf": ...,
},
fallback_parser=...,
)
""" # noqa: E501
def __init__(
self,
handlers: Mapping[str, BaseBlobParser],
*,
fallback_parser: Optional[BaseBlobParser] = None,
) -> None:
"""Define a parser that uses mime-types to determine how to parse a blob.
Args:
handlers: A mapping from mime-types to functions that take a blob, parse it
and return a document.
fallback_parser: A fallback_parser parser to use if the mime-type is not
found in the handlers. If provided, this parser will be
used to parse blobs with all mime-types not found in
the handlers.
If not provided, a ValueError will be raised if the
mime-type is not found in the handlers.
"""
self.handlers = handlers
self.fallback_parser = fallback_parser
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load documents from a blob."""
mimetype = blob.mimetype
if mimetype is None:
raise ValueError(f"{blob} does not have a mimetype.")
if mimetype in self.handlers:
handler = self.handlers[mimetype]
yield from handler.lazy_parse(blob)
else:
if self.fallback_parser is not None:
yield from self.fallback_parser.lazy_parse(blob)
else:
raise ValueError(f"Unsupported mime type: {mimetype}")
| [] |
2024-01-10 | mth93/langchain | libs~experimental~langchain_experimental~llms~ollama_functions.py | import json
from typing import Any, Dict, List, Optional
from langchain.chat_models.ollama import ChatOllama
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models import BaseChatModel
from libs.core.langchain_core.messages import AIMessage, BaseMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatResult
from libs.core.langchain_core.prompts import SystemMessagePromptTemplate
from langchain_experimental.pydantic_v1 import root_validator
DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
{tools}
You must always select one of the above tools and respond with only a JSON object matching the following schema:
{{
"tool": <name of the selected tool>,
"tool_input": <parameters for the selected tool, matching the tool's JSON schema>
}}
""" # noqa: E501
DEFAULT_RESPONSE_FUNCTION = {
"name": "__conversational_response",
"description": (
"Respond conversationally if no other tools should be called for a given query."
),
"parameters": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "Conversational response to the user.",
},
},
"required": ["response"],
},
}
class OllamaFunctions(BaseChatModel):
llm: ChatOllama
tool_system_prompt_template: str
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
values["llm"] = values.get("llm") or ChatOllama(**values, format="json")
values["tool_system_prompt_template"] = (
values.get("tool_system_prompt_template") or DEFAULT_SYSTEM_TEMPLATE
)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
functions = kwargs.get("functions", [])
if "function_call" in kwargs:
functions = [
fn for fn in functions if fn["name"] == kwargs["function_call"]["name"]
]
if not functions:
raise ValueError(
'If "function_call" is specified, you must also pass a matching \
function in "functions".'
)
del kwargs["function_call"]
elif not functions:
functions.append(DEFAULT_RESPONSE_FUNCTION)
system_message_prompt_template = SystemMessagePromptTemplate.from_template(
self.tool_system_prompt_template
)
system_message = system_message_prompt_template.format(
tools=json.dumps(functions, indent=2)
)
if "functions" in kwargs:
del kwargs["functions"]
response_message = self.llm.predict_messages(
[system_message] + messages, stop=stop, callbacks=run_manager, **kwargs
)
chat_generation_content = response_message.content
if not isinstance(chat_generation_content, str):
raise ValueError("OllamaFunctions does not support non-string output.")
try:
parsed_chat_result = json.loads(chat_generation_content)
except json.JSONDecodeError:
raise ValueError(
f'"{self.llm.model}" did not respond with valid JSON. Please try again.'
)
called_tool_name = parsed_chat_result["tool"]
called_tool_arguments = parsed_chat_result["tool_input"]
called_tool = next(
(fn for fn in functions if fn["name"] == called_tool_name), None
)
if called_tool is None:
raise ValueError(
f"Failed to parse a function call from {self.llm.model} \
output: {chat_generation_content}"
)
if called_tool["name"] == DEFAULT_RESPONSE_FUNCTION["name"]:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content=called_tool_arguments["response"],
)
)
]
)
response_message_with_functions = AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": called_tool_name,
"arguments": json.dumps(called_tool_arguments)
if called_tool_arguments
else "",
},
},
)
return ChatResult(
generations=[ChatGeneration(message=response_message_with_functions)]
)
@property
def _llm_type(self) -> str:
return "ollama_functions"
| [
"You have access to the following tools:\n\n{tools}\n\nYou must always select one of the above tools and respond with only a JSON object matching the following schema:\n\n{{\n \"tool\": <name of the selected tool>,\n \"tool_input\": <parameters for the selected tool, matching the tool's JSON schema>\n}}\n",
"response"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~momento_vector_index.py | import logging
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
from uuid import uuid4
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import get_from_env
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import (
DistanceStrategy,
maximal_marginal_relevance,
)
VST = TypeVar("VST", bound="VectorStore")
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from momento import PreviewVectorIndexClient
class MomentoVectorIndex(VectorStore):
"""`Momento Vector Index` (MVI) vector store.
Momento Vector Index is a serverless vector index that can be used to store and
search vectors. To use you should have the ``momento`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import MomentoVectorIndex
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
vectorstore = MomentoVectorIndex(
embedding=OpenAIEmbeddings(),
client=PreviewVectorIndexClient(
VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
"MOMENTO_API_KEY"
),
),
index_name="my-index",
)
"""
def __init__(
self,
embedding: Embeddings,
client: "PreviewVectorIndexClient",
index_name: str = "default",
distance_strategy: DistanceStrategy = DistanceStrategy.COSINE,
text_field: str = "text",
ensure_index_exists: bool = True,
**kwargs: Any,
):
"""Initialize a Vector Store backed by Momento Vector Index.
Args:
embedding (Embeddings): The embedding function to use.
configuration (VectorIndexConfiguration): The configuration to initialize
the Vector Index with.
credential_provider (CredentialProvider): The credential provider to
authenticate the Vector Index with.
index_name (str, optional): The name of the index to store the documents in.
Defaults to "default".
distance_strategy (DistanceStrategy, optional): The distance strategy to
use. If you select DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses
the squared Euclidean distance. Defaults to DistanceStrategy.COSINE.
text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
"""
try:
from momento import PreviewVectorIndexClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
self._client: PreviewVectorIndexClient = client
self._embedding = embedding
self.index_name = index_name
self.__validate_distance_strategy(distance_strategy)
self.distance_strategy = distance_strategy
self.text_field = text_field
self._ensure_index_exists = ensure_index_exists
@staticmethod
def __validate_distance_strategy(distance_strategy: DistanceStrategy) -> None:
if distance_strategy not in [
DistanceStrategy.COSINE,
DistanceStrategy.MAX_INNER_PRODUCT,
DistanceStrategy.MAX_INNER_PRODUCT,
]:
raise ValueError(f"Distance strategy {distance_strategy} not implemented.")
@property
def embeddings(self) -> Embeddings:
return self._embedding
def _create_index_if_not_exists(self, num_dimensions: int) -> bool:
"""Create index if it does not exist."""
from momento.requests.vector_index import SimilarityMetric
from momento.responses.vector_index import CreateIndex
similarity_metric = None
if self.distance_strategy == DistanceStrategy.COSINE:
similarity_metric = SimilarityMetric.COSINE_SIMILARITY
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
similarity_metric = SimilarityMetric.INNER_PRODUCT
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY
else:
logger.error(f"Distance strategy {self.distance_strategy} not implemented.")
raise ValueError(
f"Distance strategy {self.distance_strategy} not implemented."
)
response = self._client.create_index(
self.index_name, num_dimensions, similarity_metric
)
if isinstance(response, CreateIndex.Success):
return True
elif isinstance(response, CreateIndex.IndexAlreadyExists):
return False
elif isinstance(response, CreateIndex.Error):
logger.error(f"Error creating index: {response.inner_exception}")
raise response.inner_exception
else:
logger.error(f"Unexpected response: {response}")
raise Exception(f"Unexpected response: {response}")
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadatas associated with
the texts.
kwargs (Any): Other optional parameters. Specifically:
- ids (List[str], optional): List of ids to use for the texts.
Defaults to None, in which case uuids are generated.
Returns:
List[str]: List of ids from adding the texts into the vectorstore.
"""
from momento.requests.vector_index import Item
from momento.responses.vector_index import UpsertItemBatch
texts = list(texts)
if len(texts) == 0:
return []
if metadatas is not None:
for metadata, text in zip(metadatas, texts):
metadata[self.text_field] = text
else:
metadatas = [{self.text_field: text} for text in texts]
try:
embeddings = self._embedding.embed_documents(texts)
except NotImplementedError:
embeddings = [self._embedding.embed_query(x) for x in texts]
# Create index if it does not exist.
# We assume that if it does exist, then it was created with the desired number
# of dimensions and similarity metric.
if self._ensure_index_exists:
self._create_index_if_not_exists(len(embeddings[0]))
if "ids" in kwargs:
ids = kwargs["ids"]
if len(ids) != len(embeddings):
raise ValueError("Number of ids must match number of texts")
else:
ids = [str(uuid4()) for _ in range(len(embeddings))]
batch_size = 128
for i in range(0, len(embeddings), batch_size):
start = i
end = min(i + batch_size, len(embeddings))
items = [
Item(id=id, vector=vector, metadata=metadata)
for id, vector, metadata in zip(
ids[start:end],
embeddings[start:end],
metadatas[start:end],
)
]
response = self._client.upsert_item_batch(self.index_name, items)
if isinstance(response, UpsertItemBatch.Success):
pass
elif isinstance(response, UpsertItemBatch.Error):
raise response.inner_exception
else:
raise Exception(f"Unexpected response: {response}")
return ids
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from momento.responses.vector_index import DeleteItemBatch
if ids is None:
return True
response = self._client.delete_item_batch(self.index_name, ids)
return isinstance(response, DeleteItemBatch.Success)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = self.similarity_search_with_score(query=query, k=k, **kwargs)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
embedding = self._embedding.embed_query(query)
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return results
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import Search
if "top_k" in kwargs:
k = kwargs["k"]
filter_expression = kwargs.get("filter_expression", None)
response = self._client.search(
self.index_name,
embedding,
top_k=k,
metadata_fields=ALL_METADATA,
filter_expression=filter_expression,
)
if not isinstance(response, Search.Success):
return []
results = []
for hit in response.hits:
text = cast(str, hit.metadata.pop(self.text_field))
doc = Document(page_content=text, metadata=hit.metadata)
pair = (doc, hit.score)
results.append(pair)
return results
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return [doc for doc, _ in results]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import SearchAndFetchVectors
filter_expression = kwargs.get("filter_expression", None)
response = self._client.search_and_fetch_vectors(
self.index_name,
embedding,
top_k=fetch_k,
metadata_fields=ALL_METADATA,
filter_expression=filter_expression,
)
if isinstance(response, SearchAndFetchVectors.Success):
pass
elif isinstance(response, SearchAndFetchVectors.Error):
logger.error(f"Error searching and fetching vectors: {response}")
return []
else:
logger.error(f"Unexpected response: {response}")
raise Exception(f"Unexpected response: {response}")
mmr_selected = maximal_marginal_relevance(
query_embedding=np.array([embedding], dtype=np.float32),
embedding_list=[hit.vector for hit in response.hits],
lambda_mult=lambda_mult,
k=k,
)
selected = [response.hits[i].metadata for i in mmr_selected]
return [
Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore # noqa: E501
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, **kwargs
)
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
"""Return the Vector Store initialized from texts and embeddings.
Args:
cls (Type[VST]): The Vector Store class to use to initialize
the Vector Store.
texts (List[str]): The texts to initialize the Vector Store with.
embedding (Embeddings): The embedding function to use.
metadatas (Optional[List[dict]], optional): The metadata associated with
the texts. Defaults to None.
kwargs (Any): Vector Store specific parameters. The following are forwarded
to the Vector Store constructor and required:
- index_name (str, optional): The name of the index to store the documents
in. Defaults to "default".
- text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
- distance_strategy (DistanceStrategy, optional): The distance strategy to
use. Defaults to DistanceStrategy.COSINE. If you select
DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared
Euclidean distance.
- ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
Additionally you can either pass in a client or an API key
- client (PreviewVectorIndexClient): The Momento Vector Index client to use.
- api_key (Optional[str]): The configuration to use to initialize
the Vector Index with. Defaults to None. If None, the configuration
is initialized from the environment variable `MOMENTO_API_KEY`.
Returns:
VST: Momento Vector Index vector store initialized from texts and
embeddings.
"""
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
if "client" in kwargs:
client = kwargs.pop("client")
else:
supplied_api_key = kwargs.pop("api_key", None)
api_key = supplied_api_key or get_from_env("api_key", "MOMENTO_API_KEY")
client = PreviewVectorIndexClient(
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_string(api_key),
)
vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore
vector_db.add_texts(texts=texts, metadatas=metadatas, **kwargs)
return vector_db
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~predictionguard.py | import logging
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PredictionGuard(LLM):
"""Prediction Guard large language models.
To use, you should have the ``predictionguard`` python package installed, and the
environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass
it as a named parameter to the constructor. To use Prediction Guard's API along
with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your
OpenAI API key as well.
Example:
.. code-block:: python
pgllm = PredictionGuard(model="MPT-7B-Instruct",
token="my-access-token",
output={
"type": "boolean"
})
"""
client: Any #: :meta private:
model: Optional[str] = "MPT-7B-Instruct"
"""Model name to use."""
output: Optional[Dict[str, Any]] = None
"""The output type or structure for controlling the LLM output."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
token: Optional[str] = None
"""Your Prediction Guard access token."""
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the access token and python package exists in environment."""
token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN")
try:
import predictionguard as pg
values["client"] = pg.Client(token=token)
except ImportError:
raise ImportError(
"Could not import predictionguard python package. "
"Please install it with `pip install predictionguard`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Prediction Guard API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "predictionguard"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Prediction Guard's model API.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = pgllm("Tell me a joke.")
"""
import predictionguard as pg
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = pg.Completion.create(
model=self.model,
prompt=prompt,
output=self.output,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
**kwargs,
)
text = response["choices"][0]["text"]
# If stop tokens are provided, Prediction Guard's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~chat_models~test_baichuan.py | from typing import cast
import pytest
from libs.core.langchain_core.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.chat_models.baichuan import (
ChatBaichuan,
_convert_delta_to_message_chunk,
_convert_dict_to_message,
_convert_message_to_dict,
_signature,
)
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = _convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_message_to_dict_function() -> None:
message = FunctionMessage(name="foo", content="bar")
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert "Got unknown type" in str(e)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role="system", content="foo")
assert result == expected_output
def test__convert_delta_to_message_assistant() -> None:
delta = {"role": "assistant", "content": "foo"}
result = _convert_delta_to_message_chunk(delta, AIMessageChunk)
expected_output = AIMessageChunk(content="foo")
assert result == expected_output
def test__convert_delta_to_message_human() -> None:
delta = {"role": "user", "content": "foo"}
result = _convert_delta_to_message_chunk(delta, HumanMessageChunk)
expected_output = HumanMessageChunk(content="foo")
assert result == expected_output
def test__signature() -> None:
secret_key = SecretStr("YOUR_SECRET_KEY")
result = _signature(
secret_key=secret_key,
payload={
"model": "Baichuan2-53B",
"messages": [{"role": "user", "content": "Hi"}],
},
timestamp=1697734335,
)
# The signature was generated by the demo provided by Baichuan.
# https://platform.baichuan-ai.com/docs/api#4
expected_output = "24a50b2db1648e25a244c67c5ab57d3f"
assert result == expected_output
def test_baichuan_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("BAICHUAN_API_KEY", "test-api-key")
monkeypatch.setenv("BAICHUAN_SECRET_KEY", "test-secret-key")
chat = ChatBaichuan()
print(chat.baichuan_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
print(chat.baichuan_secret_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_baichuan_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
chat = ChatBaichuan(
baichuan_api_key="test-api-key", baichuan_secret_key="test-secret-key"
)
print(chat.baichuan_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
print(chat.baichuan_secret_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_uses_actual_secret_value_from_secret_str() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
chat = ChatBaichuan(
baichuan_api_key="test-api-key", baichuan_secret_key="test-secret-key"
)
assert cast(SecretStr, chat.baichuan_api_key).get_secret_value() == "test-api-key"
assert (
cast(SecretStr, chat.baichuan_secret_key).get_secret_value()
== "test-secret-key"
)
| [
"bar",
"foo",
"Hi"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~router~multi_retrieval_qa.py | """Use a single chain to route an input to one of multiple retrieval qa chains."""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import PromptTemplate
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain.chains import ConversationChain
from langchain.chains.base import Chain
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langchain.chains.router.base import MultiRouteChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_retrieval_prompt import (
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
)
from langchain.chat_models import ChatOpenAI
class MultiRetrievalQAChain(MultiRouteChain):
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains."""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
def output_keys(self) -> List[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: List[Dict[str, Any]],
default_retriever: Optional[BaseRetriever] = None,
default_prompt: Optional[PromptTemplate] = None,
default_chain: Optional[Chain] = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
if default_prompt and not default_retriever:
raise ValueError(
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm, prompt=default_prompt, retriever=default_retriever
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template, input_variables=["history", "query"]
)
_default_chain = ConversationChain(
llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result"
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
| [
"input"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~context_callback.py | """Callback handler for Context AI"""
import os
from typing import Any, Dict, List
from uuid import UUID
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.messages import BaseMessage
from libs.core.langchain_core.outputs import LLMResult
def import_context() -> Any:
"""Import the `getcontext` package."""
try:
import getcontext # noqa: F401
from getcontext.generated.models import (
Conversation,
Message,
MessageRole,
Rating,
)
from getcontext.token import Credential # noqa: F401
except ImportError:
raise ImportError(
"To use the context callback manager you need to have the "
"`getcontext` python package installed (version >=0.3.0). "
"Please install it with `pip install --upgrade python-context`"
)
return getcontext, Credential, Conversation, Message, MessageRole, Rating
class ContextCallbackHandler(BaseCallbackHandler):
"""Callback Handler that records transcripts to the Context service.
(https://context.ai).
Keyword Args:
token (optional): The token with which to authenticate requests to Context.
Visit https://with.context.ai/settings to generate a token.
If not provided, the value of the `CONTEXT_TOKEN` environment
variable will be used.
Raises:
ImportError: if the `context-python` package is not installed.
Chat Example:
>>> from langchain_community.llms import ChatOpenAI
>>> from langchain_community.callbacks import ContextCallbackHandler
>>> context_callback = ContextCallbackHandler(
... token="<CONTEXT_TOKEN_HERE>",
... )
>>> chat = ChatOpenAI(
... temperature=0,
... headers={"user_id": "123"},
... callbacks=[context_callback],
... openai_api_key="API_KEY_HERE",
... )
>>> messages = [
... SystemMessage(content="You translate English to French."),
... HumanMessage(content="I love programming with LangChain."),
... ]
>>> chat(messages)
Chain Example:
>>> from langchain.chains import LLMChain
>>> from langchain_community.chat_models import ChatOpenAI
>>> from langchain_community.callbacks import ContextCallbackHandler
>>> context_callback = ContextCallbackHandler(
... token="<CONTEXT_TOKEN_HERE>",
... )
>>> human_message_prompt = HumanMessagePromptTemplate(
... prompt=PromptTemplate(
... template="What is a good name for a company that makes {product}?",
... input_variables=["product"],
... ),
... )
>>> chat_prompt_template = ChatPromptTemplate.from_messages(
... [human_message_prompt]
... )
>>> callback = ContextCallbackHandler(token)
>>> # Note: the same callback object must be shared between the
... LLM and the chain.
>>> chat = ChatOpenAI(temperature=0.9, callbacks=[callback])
>>> chain = LLMChain(
... llm=chat,
... prompt=chat_prompt_template,
... callbacks=[callback]
... )
>>> chain.run("colorful socks")
"""
def __init__(self, token: str = "", verbose: bool = False, **kwargs: Any) -> None:
(
self.context,
self.credential,
self.conversation_model,
self.message_model,
self.message_role_model,
self.rating_model,
) = import_context()
token = token or os.environ.get("CONTEXT_TOKEN") or ""
self.client = self.context.ContextAPI(credential=self.credential(token))
self.chain_run_id = None
self.llm_model = None
self.messages: List[Any] = []
self.metadata: Dict[str, str] = {}
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
**kwargs: Any,
) -> Any:
"""Run when the chat model is started."""
llm_model = kwargs.get("invocation_params", {}).get("model", None)
if llm_model is not None:
self.metadata["model"] = llm_model
if len(messages) == 0:
return
for message in messages[0]:
role = self.message_role_model.SYSTEM
if message.type == "human":
role = self.message_role_model.USER
elif message.type == "system":
role = self.message_role_model.SYSTEM
elif message.type == "ai":
role = self.message_role_model.ASSISTANT
self.messages.append(
self.message_model(
message=message.content,
role=role,
)
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends."""
if len(response.generations) == 0 or len(response.generations[0]) == 0:
return
if not self.chain_run_id:
generation = response.generations[0][0]
self.messages.append(
self.message_model(
message=generation.text,
role=self.message_role_model.ASSISTANT,
)
)
self._log_conversation()
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts."""
self.chain_run_id = kwargs.get("run_id", None)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends."""
self.messages.append(
self.message_model(
message=outputs["text"],
role=self.message_role_model.ASSISTANT,
)
)
self._log_conversation()
self.chain_run_id = None
def _log_conversation(self) -> None:
"""Log the conversation to the context API."""
if len(self.messages) == 0:
return
self.client.log.conversation_upsert(
body={
"conversation": self.conversation_model(
messages=self.messages,
metadata=self.metadata,
)
}
)
self.messages = []
self.metadata = {}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~acreom.py | import re
from pathlib import Path
from typing import Iterator, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class AcreomLoader(BaseLoader):
"""Load `acreom` vault from a directory."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL)
"""Regex to match front matter metadata in markdown files."""
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize the loader."""
self.file_path = path
"""Path to the directory containing the markdown files."""
self.encoding = encoding
"""Encoding to use when reading the files."""
self.collect_metadata = collect_metadata
"""Whether to collect metadata from the front matter."""
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split("\n")
for line in lines:
if ":" in line:
key, value = line.split(":", 1)
front_matter[key.strip()] = value.strip()
else:
# Skip lines without a colon
continue
return front_matter
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def _process_acreom_content(self, content: str) -> str:
# remove acreom specific elements from content that
# do not contribute to the context of current document
content = re.sub(r"\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks
content = re.sub(r"#", "", content) # rm hashtags
content = re.sub(r"\[\[.*?\]\]", "", content) # rm doclinks
return content
def lazy_load(self) -> Iterator[Document]:
ps = list(Path(self.file_path).glob("**/*.md"))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {
"source": str(p.name),
"path": str(p),
**front_matter,
}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~arcee.py | from typing import Any, Dict, List, Optional, Union, cast
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter
class Arcee(LLM):
"""Arcee's Domain Adapted Language Models (DALMs).
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
or pass ``arcee_api_key`` as a named parameter.
Example:
.. code-block:: python
from langchain_community.llms import Arcee
arcee = Arcee(
model="DALM-PubMed",
arcee_api_key="ARCEE-API-KEY"
)
response = arcee("AI-driven music therapy")
"""
_client: Optional[ArceeWrapper] = None #: :meta private:
"""Arcee _client."""
arcee_api_key: Union[SecretStr, str, None] = None
"""Arcee API Key"""
model: str
"""Arcee DALM name"""
arcee_api_url: str = "https://api.arcee.ai"
"""Arcee API URL"""
arcee_api_version: str = "v2"
"""Arcee API Version"""
arcee_app_url: str = "https://app.arcee.ai"
"""Arcee App URL"""
model_id: str = ""
"""Arcee Model ID"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Keyword arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
underscore_attrs_are_private = True
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "arcee"
def __init__(self, **data: Any) -> None:
"""Initializes private fields."""
super().__init__(**data)
api_key = cast(SecretStr, self.arcee_api_key)
self._client = ArceeWrapper(
arcee_api_key=api_key,
arcee_api_url=self.arcee_api_url,
arcee_api_version=self.arcee_api_version,
model_kwargs=self.model_kwargs,
model_name=self.model,
)
@root_validator(pre=False)
def validate_environments(cls, values: Dict) -> Dict:
"""Validate Arcee environment variables."""
# validate env vars
values["arcee_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"arcee_api_key",
"ARCEE_API_KEY",
)
)
values["arcee_api_url"] = get_from_dict_or_env(
values,
"arcee_api_url",
"ARCEE_API_URL",
)
values["arcee_app_url"] = get_from_dict_or_env(
values,
"arcee_app_url",
"ARCEE_APP_URL",
)
values["arcee_api_version"] = get_from_dict_or_env(
values,
"arcee_api_version",
"ARCEE_API_VERSION",
)
# validate model kwargs
if values.get("model_kwargs"):
kw = values["model_kwargs"]
# validate size
if kw.get("size") is not None:
if not kw.get("size") >= 0:
raise ValueError("`size` must be positive")
# validate filters
if kw.get("filters") is not None:
if not isinstance(kw.get("filters"), List):
raise ValueError("`filters` must be a list")
for f in kw.get("filters"):
DALMFilter(**f)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError("Client is not initialized.")
return self._client.generate(prompt=prompt, **kwargs)
except Exception as e:
raise Exception(f"Failed to generate text: {e}") from e
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~openlm.py | from typing import Any, Dict
from libs.core.langchain_core.pydantic_v1 import root_validator
from langchain_community.llms.openai import BaseOpenAI
class OpenLM(BaseOpenAI):
"""OpenLM models."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
try:
import openlm
values["client"] = openlm.Completion
except ImportError:
raise ImportError(
"Could not import openlm python package. "
"Please install it with `pip install openlm`."
)
if values["streaming"]:
raise ValueError("Streaming not supported with openlm")
return values
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~url.py | """Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class UnstructuredURLLoader(BaseLoader):
"""Load files from remote URLs using `Unstructured`.
Use the unstructured partition function to detect the MIME type
and route the file to the appropriate partitioner.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredURLLoader
loader = UnstructuredURLLoader(
urls=["<url-1>", "<url-2>"], mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
mode: str = "single",
show_progress_bar: bool = False,
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop("headers", {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
"You are using an old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
self.show_progress_bar = show_progress_bar
def _validate_mode(self, mode: str) -> None:
_valid_modes = {"single", "elements"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
def __is_headers_available_for_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def __is_headers_available_for_non_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 13)
def __is_non_html_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 12)
def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.auto import partition
from unstructured.partition.html import partition_html
docs: List[Document] = list()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. "
"Please install with 'pip install tqdm' or set "
"show_progress_bar=False."
) from e
urls = tqdm(self.urls)
else:
urls = self.urls
for url in urls:
try:
if self.__is_non_html_available():
if self.__is_headers_available_for_non_html():
elements = partition(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition(url=url, **self.unstructured_kwargs)
else:
if self.__is_headers_available_for_html():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
else:
raise e
if self.mode == "single":
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
elif self.mode == "elements":
for element in elements:
metadata = element.metadata.to_dict()
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~gpt4all.py | from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from langchain_community.llms.utils import enforce_stop_tokens
class GPT4All(LLM):
"""GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain_community.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
max_tokens: int = Field(200, alias="max_tokens")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.1
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(8, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
device: Optional[str] = Field("cpu", alias="device")
"""Device name: cpu, gpu, nvidia, intel, amd or DeviceName."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
}
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
device=values["device"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~bedrock.py | import asyncio
import json
import os
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.runnables.config import run_in_executor
class BedrockEmbeddings(BaseModel, Embeddings):
"""Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain_community.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-embed-text-v1"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-embed-text-v1"
"""Id of the model to call, e.g., amazon.titan-embed-text-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
# format input body for provider
provider = self.model_id.split(".")[0]
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs}
if provider == "cohere":
if "input_type" not in input_body.keys():
input_body["input_type"] = "search_document"
input_body["texts"] = [text]
else:
# includes common provider == "amazon"
input_body["inputText"] = text
body = json.dumps(input_body)
try:
# invoke bedrock API
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
# format output based on provider
response_body = json.loads(response.get("body").read())
if provider == "cohere":
return response_body.get("embeddings")[0]
else:
# includes common provider == "amazon"
return response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return await run_in_executor(None, self.embed_query, text)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~retrievers~test_bm25.py | import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.retrievers.bm25 import BM25Retriever
@pytest.mark.requires("rank_bm25")
def test_from_texts() -> None:
input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."]
bm25_retriever = BM25Retriever.from_texts(texts=input_texts)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
@pytest.mark.requires("rank_bm25")
def test_from_texts_with_bm25_params() -> None:
input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."]
bm25_retriever = BM25Retriever.from_texts(
texts=input_texts, bm25_params={"epsilon": 10}
)
# should count only multiple words (have, pan)
assert bm25_retriever.vectorizer.epsilon == 10
@pytest.mark.requires("rank_bm25")
def test_from_documents() -> None:
input_docs = [
Document(page_content="I have a pen."),
Document(page_content="Do you have a pen?"),
Document(page_content="I have a bag."),
]
bm25_retriever = BM25Retriever.from_documents(documents=input_docs)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~college_confidential.py | from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class CollegeConfidentialLoader(WebBaseLoader):
"""Load `College Confidential` webpages."""
def load(self) -> List[Document]:
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.