date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_qianfan_endpoint.py | """Test Baidu Qianfan Chat Endpoint."""
from typing import Any
from libs.core.langchain_core.callbacks import CallbackManager
from libs.core.langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
)
from libs.core.langchain_core.outputs import ChatGeneration, LLMResult
from libs.core.langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
_FUNCTIONS: Any = [
{
"name": "format_person_info",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": {
"title": "Person",
"description": "Identifying information about a person.",
"type": "object",
"properties": {
"name": {
"title": "Name",
"description": "The person's name",
"type": "string",
},
"age": {
"title": "Age",
"description": "The person's age",
"type": "integer",
},
"fav_food": {
"title": "Fav Food",
"description": "The person's favorite food",
"type": "string",
},
},
"required": ["name", "age"],
},
},
{
"name": "get_current_temperature",
"description": ("Used to get the location's temperature."),
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "city name",
},
"unit": {
"type": "string",
"enum": ["centigrade", "Fahrenheit"],
},
},
"required": ["location", "unit"],
},
"responses": {
"type": "object",
"properties": {
"temperature": {
"type": "integer",
"description": "city temperature",
},
"unit": {
"type": "string",
"enum": ["centigrade", "Fahrenheit"],
},
},
},
},
]
def test_default_call() -> None:
"""Test default model(`ERNIE-Bot`) call."""
chat = QianfanChatEndpoint()
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_model() -> None:
"""Test model kwarg works."""
chat = QianfanChatEndpoint(model="BLOOMZ-7B")
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_model_param() -> None:
"""Test model params works."""
chat = QianfanChatEndpoint()
response = chat(model="BLOOMZ-7B", messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_endpoint() -> None:
"""Test user custom model deployments like some open source models."""
chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed")
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_endpoint_param() -> None:
"""Test user custom model deployments like some open source models."""
chat = QianfanChatEndpoint()
response = chat(
messages=[
HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = QianfanChatEndpoint()
response = chat(
messages=[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="How are you doing?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = QianfanChatEndpoint(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(
messages=[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="Who are you?"),
],
stream=True,
callbacks=callback_manager,
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = QianfanChatEndpoint()
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_functions_call_thoughts() -> None:
chat = QianfanChatEndpoint(model="ERNIE-Bot")
prompt_tmpl = "Use the given functions to answer following question: {input}"
prompt_msgs = [
HumanMessagePromptTemplate.from_template(prompt_tmpl),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = prompt | chat.bind(functions=_FUNCTIONS)
message = HumanMessage(content="What's the temperature in Shanghai today?")
response = chain.batch([{"input": message}])
assert isinstance(response[0], AIMessage)
assert "function_call" in response[0].additional_kwargs
def test_functions_call() -> None:
chat = QianfanChatEndpoint(model="ERNIE-Bot")
prompt = ChatPromptTemplate(
messages=[
HumanMessage(content="What's the temperature in Shanghai today?"),
AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "get_current_temperature",
"thoughts": "i will use get_current_temperature "
"to resolve the questions",
"arguments": '{"location":"Shanghai","unit":"centigrade"}',
}
},
),
FunctionMessage(
name="get_current_weather",
content='{"temperature": "25", \
"unit": "摄氏度", "description": "晴朗"}',
),
]
)
chain = prompt | chat.bind(functions=_FUNCTIONS)
resp = chain.invoke({})
assert isinstance(resp, AIMessage)
| [
"function_call",
"Who are you?",
"get_current_weather",
"{\"temperature\": \"25\", \"unit\": \"摄氏度\", \"description\": \"晴朗\"}",
"Use the given functions to answer following question: {input}",
"get_current_temperature",
"to resolve the questions",
"name",
"Hello.",
"Hello!",
"How are you doing?",
"{\"location\":\"Shanghai\",\"unit\":\"centigrade\"}",
"i will use get_current_temperature ",
"Hi, how are you.",
"What's the temperature in Shanghai today?",
"arguments",
"Hello"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~deepinfra.py | import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
import aiohttp
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.utilities.requests import Requests
DEFAULT_MODEL_ID = "google/flan-t5-xl"
class DeepInfra(LLM):
"""DeepInfra models.
To use, you should have the environment variable ``DEEPINFRA_API_TOKEN``
set with your API token, or pass it as a named parameter to the
constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain_community.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[Dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _url(self) -> str:
return f"https://api.deepinfra.com/v1/inference/{self.model_id}"
def _headers(self) -> Dict:
return {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
def _body(self, prompt: str, kwargs: Any) -> Dict:
model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {
"input": prompt,
**model_kwargs,
}
def _handle_status(self, code: int, text: Any) -> None:
if code >= 500:
raise Exception(f"DeepInfra Server: Error {code}")
elif code >= 400:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
f"DeepInfra returned an unexpected response with status "
f"{code}: {text}"
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data["results"][0]["generated_text"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, kwargs)
) as response:
self._handle_status(response.status, response.text)
data = await response.json()
return data["results"][0]["generated_text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
request = Requests(headers=self._headers())
response = request.post(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
)
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
) as response:
self._handle_status(response.status, response.text)
async for line in _parse_stream_async(response.content):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
async def _parse_stream_async(rbody: aiohttp.StreamReader) -> AsyncIterator[str]:
async for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
def _parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace
line = line[len(b"data: ") :]
else:
line = line[len(b"data:") :]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def _handle_sse_line(line: str) -> Optional[GenerationChunk]:
try:
obj = json.loads(line)
return GenerationChunk(
text=obj.get("token", {}).get("text"),
)
except Exception:
return None
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~apify_dataset.py | from typing import Any, Callable, Dict, List
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain_community.document_loaders.base import BaseLoader
class ApifyDatasetLoader(BaseLoader, BaseModel):
"""Load datasets from `Apify` web scraping, crawling, and data extraction platform.
For details, see https://docs.apify.com/platform/integrations/langchain
Example:
.. code-block:: python
from langchain_community.document_loaders import ApifyDatasetLoader
from libs.core.langchain_core.documents import Document
loader = ApifyDatasetLoader(
dataset_id="YOUR-DATASET-ID",
dataset_mapping_function=lambda dataset_item: Document(
page_content=dataset_item["text"], metadata={"source": dataset_item["url"]}
),
)
documents = loader.load()
""" # noqa: E501
apify_client: Any
"""An instance of the ApifyClient class from the apify-client Python package."""
dataset_id: str
"""The ID of the dataset on the Apify platform."""
dataset_mapping_function: Callable[[Dict], Document]
"""A custom function that takes a single dictionary (an Apify dataset item)
and converts it to an instance of the Document class."""
def __init__(
self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document]
):
"""Initialize the loader with an Apify dataset ID and a mapping function.
Args:
dataset_id (str): The ID of the dataset on the Apify platform.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an instance
of the Document class.
"""
super().__init__(
dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Args:
values: The values to validate.
"""
try:
from apify_client import ApifyClient
values["apify_client"] = ApifyClient()
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
def load(self) -> List[Document]:
"""Load documents."""
dataset_items = (
self.apify_client.dataset(self.dataset_id).list_items(clean=True).items
)
return list(map(self.dataset_mapping_function, dataset_items))
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~llms~test_ai21.py | """Test AI21 llm"""
from typing import cast
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.ai21 import AI21
def test_api_key_is_secret_string() -> None:
llm = AI21(ai21_api_key="secret-api-key")
assert isinstance(llm.ai21_api_key, SecretStr)
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("AI21_API_KEY", "secret-api-key")
llm = AI21()
print(llm.ai21_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
llm = AI21(ai21_api_key="secret-api-key")
print(llm.ai21_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_uses_actual_secret_value_from_secretstr() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = AI21(ai21_api_key="secret-api-key")
assert cast(SecretStr, llm.ai21_api_key).get_secret_value() == "secret-api-key"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~arxiv.py | from typing import Any, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.arxiv import ArxivAPIWrapper
class ArxivLoader(BaseLoader):
"""Load a query result from `Arxiv`.
The loader converts the original PDF format into the text.
Args:
Supports all arguments of `ArxivAPIWrapper`.
"""
def __init__(
self, query: str, doc_content_chars_max: Optional[int] = None, **kwargs: Any
):
self.query = query
self.client = ArxivAPIWrapper(
doc_content_chars_max=doc_content_chars_max, **kwargs
)
def load(self) -> List[Document]:
return self.client.load(self.query)
def get_summaries_as_docs(self) -> List[Document]:
return self.client.get_summaries_as_docs(self.query)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~wikipedia.py | from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaLoader(BaseLoader):
"""Load from `Wikipedia`.
The hard limit on the length of the query is 300 for now.
Each wiki page represents one Document.
"""
def __init__(
self,
query: str,
lang: str = "en",
load_max_docs: Optional[int] = 25,
load_all_available_meta: Optional[bool] = False,
doc_content_chars_max: Optional[int] = 4000,
):
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
def load(self) -> List[Document]:
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(
lang=self.lang,
top_k_results=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max,
)
docs = client.load(self.query)
return docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~example_generator.py | from typing import List
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts.few_shot import FewShotPromptTemplate
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.llm import LLMChain
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
| [
"Add another example."
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~sagemaker_endpoint.py | """Sagemaker InvokeEndpoint API."""
import io
import json
from abc import abstractmethod
from typing import Any, Dict, Generic, Iterator, List, Mapping, Optional, TypeVar, Union
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from langchain_community.llms.utils import enforce_stop_tokens
INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]])
OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]], Iterator])
class LineIterator:
"""
A helper class for parsing the byte stream input.
The output of the model will be in the following format:
b'{"outputs": [" a"]}\n'
b'{"outputs": [" challenging"]}\n'
b'{"outputs": [" problem"]}\n'
...
While usually each PayloadPart event from the event stream will
contain a byte array with a full json, this is not guaranteed
and some of the json objects may be split acrossPayloadPart events.
For example:
{'PayloadPart': {'Bytes': b'{"outputs": '}}
{'PayloadPart': {'Bytes': b'[" problem"]}\n'}}
This class accounts for this by concatenating bytes written via the 'write' function
and then exposing a method which will return lines (ending with a '\n' character)
within the buffer via the 'scan_lines' function.
It maintains the position of the last read position to ensure
that previous bytes are not exposed again.
For more details see:
https://aws.amazon.com/blogs/machine-learning/elevating-the-generative-ai-experience-introducing-streaming-support-in-amazon-sagemaker-hosting/
"""
def __init__(self, stream: Any) -> None:
self.byte_iterator = iter(stream)
self.buffer = io.BytesIO()
self.read_pos = 0
def __iter__(self) -> "LineIterator":
return self
def __next__(self) -> Any:
while True:
self.buffer.seek(self.read_pos)
line = self.buffer.readline()
if line and line[-1] == ord("\n"):
self.read_pos += len(line)
return line[:-1]
try:
chunk = next(self.byte_iterator)
except StopIteration:
if self.read_pos < self.buffer.getbuffer().nbytes:
continue
raise
if "PayloadPart" not in chunk:
# Unknown Event Type
continue
self.buffer.seek(0, io.SEEK_END)
self.buffer.write(chunk["PayloadPart"]["Bytes"])
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects.
Similarly, the class handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> OUTPUT_TYPE:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class LLMContentHandler(ContentHandlerBase[str, str]):
"""Content handler for LLM class."""
class SagemakerEndpoint(LLM):
"""Sagemaker Inference Endpoint models.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Args:
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
client: boto3 client for Sagemaker Endpoint
content_handler: Implementation for model specific LLMContentHandler
Example:
.. code-block:: python
from langchain_community.llms import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
#Use with boto3 client
client = boto3.client(
"sagemaker-runtime",
region_name=region_name
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
client=client
)
"""
client: Any = None
"""Boto3 client for sagemaker runtime"""
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: LLMContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
streaming: bool = False
"""Whether to stream the results."""
"""
Example:
.. code-block:: python
from langchain_community.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Dont do anything if client provided externally"""
if values.get("client") is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
if self.streaming and run_manager:
try:
resp = self.client.invoke_endpoint_with_response_stream(
EndpointName=self.endpoint_name,
Body=body,
ContentType=self.content_handler.content_type,
**_endpoint_kwargs,
)
iterator = LineIterator(resp["Body"])
current_completion: str = ""
for line in iterator:
resp = json.loads(line)
resp_output = resp.get("outputs")[0]
if stop is not None:
# Uses same approach as below
resp_output = enforce_stop_tokens(resp_output, stop)
current_completion += resp_output
run_manager.on_llm_new_token(resp_output)
return current_completion
except Exception as e:
raise ValueError(f"Error raised by streaming inference endpoint: {e}")
else:
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~arxiv.py | from typing import List
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.arxiv import ArxivAPIWrapper
class ArxivRetriever(BaseRetriever, ArxivAPIWrapper):
"""`Arxiv` retriever.
It wraps load() to get_relevant_documents().
It uses all ArxivAPIWrapper arguments without any change.
"""
get_full_documents: bool = False
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
if self.get_full_documents:
return self.load(query=query)
else:
return self.get_summaries_as_docs(query)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~bedrock.py | from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
class VectorSearchConfig(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Configuration for vector search."""
numberOfResults: int = 4
class RetrievalConfig(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Configuration for retrieval."""
vectorSearchConfiguration: VectorSearchConfig
class AmazonKnowledgeBasesRetriever(BaseRetriever):
"""`Amazon Bedrock Knowledge Bases` retrieval.
See https://aws.amazon.com/bedrock/knowledge-bases for more info.
Args:
knowledge_base_id: Knowledge Base ID.
region_name: The aws region e.g., `us-west-2`.
Fallback to AWS_DEFAULT_REGION env variable or region specified in
~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
client: boto3 client for bedrock agent runtime.
retrieval_config: Configuration for retrieval.
Example:
.. code-block:: python
from langchain_community.retrievers import AmazonKnowledgeBasesRetriever
retriever = AmazonKnowledgeBasesRetriever(
knowledge_base_id="<knowledge-base-id>",
retrieval_config={
"vectorSearchConfiguration": {
"numberOfResults": 4
}
},
)
"""
knowledge_base_id: str
region_name: Optional[str] = None
credentials_profile_name: Optional[str] = None
endpoint_url: Optional[str] = None
client: Any
retrieval_config: RetrievalConfig
@root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("client") is not None:
return values
try:
import boto3
from botocore.client import Config
from botocore.exceptions import UnknownServiceError
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {
"config": Config(
connect_timeout=120, read_timeout=120, retries={"max_attempts": 0}
)
}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
if values.get("endpoint_url"):
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-agent-runtime", **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except UnknownServiceError as e:
raise ModuleNotFoundError(
"Ensure that you have installed the latest boto3 package "
"that contains the API for `bedrock-runtime-agent`."
) from e
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = self.client.retrieve(
retrievalQuery={"text": query.strip()},
knowledgeBaseId=self.knowledge_base_id,
retrievalConfiguration=self.retrieval_config.dict(),
)
results = response["retrievalResults"]
documents = []
for result in results:
documents.append(
Document(
page_content=result["content"]["text"],
metadata={
"location": result["location"],
"score": result["score"] if "score" in result else 0,
},
)
)
return documents
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~agents~output_parsers~xml.py | from typing import Union
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from langchain.agents import AgentOutputParser
class XMLAgentOutputParser(AgentOutputParser):
"""Parses tool invocations and final answers in XML format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
<tool>search</tool>
<tool_input>what is 2 + 2</tool_input>
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
<final_answer>Foo</final_answer>
```
"""
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if "</tool>" in text:
tool, tool_input = text.split("</tool>")
_tool = tool.split("<tool>")[1]
_tool_input = tool_input.split("<tool_input>")[1]
if "</tool_input>" in _tool_input:
_tool_input = _tool_input.split("</tool_input>")[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
elif "<final_answer>" in text:
_, answer = text.split("<final_answer>")
if "</final_answer>" in answer:
answer = answer.split("</final_answer>")[0]
return AgentFinish(return_values={"output": answer}, log=text)
else:
raise ValueError
def get_format_instructions(self) -> str:
raise NotImplementedError
@property
def _type(self) -> str:
return "xml-agent"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~url_playwright.py | """Loader that uses Playwright to load a page, then uses unstructured to load the html.
"""
import logging
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.async_api import Response as AsyncResponse
from playwright.sync_api import Browser, Page, Response
logger = logging.getLogger(__name__)
class PlaywrightEvaluator(ABC):
"""Abstract base class for all evaluators.
Each evaluator should take a page, a browser instance, and a response
object, process the page as necessary, and return the resulting text.
"""
@abstractmethod
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
"""Synchronously process the page and return the resulting text.
Args:
page: The page to process.
browser: The browser instance.
response: The response from page.goto().
Returns:
text: The text content of the page.
"""
pass
@abstractmethod
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
"""Asynchronously process the page and return the resulting text.
Args:
page: The page to process.
browser: The browser instance.
response: The response from page.goto().
Returns:
text: The text content of the page.
"""
pass
class UnstructuredHtmlEvaluator(PlaywrightEvaluator):
"""Evaluates the page HTML content using the `unstructured` library."""
def __init__(self, remove_selectors: Optional[List[str]] = None):
"""Initialize UnstructuredHtmlEvaluator."""
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.remove_selectors = remove_selectors
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
"""Synchronously process the HTML content of the page."""
from unstructured.partition.html import partition_html
for selector in self.remove_selectors or []:
elements = page.locator(selector).all()
for element in elements:
if element.is_visible():
element.evaluate("element => element.remove()")
page_source = page.content()
elements = partition_html(text=page_source)
return "\n\n".join([str(el) for el in elements])
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
"""Asynchronously process the HTML content of the page."""
from unstructured.partition.html import partition_html
for selector in self.remove_selectors or []:
elements = await page.locator(selector).all()
for element in elements:
if await element.is_visible():
await element.evaluate("element => element.remove()")
page_source = await page.content()
elements = partition_html(text=page_source)
return "\n\n".join([str(el) for el in elements])
class PlaywrightURLLoader(BaseLoader):
"""Load `HTML` pages with `Playwright` and parse with `Unstructured`.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
headless (bool): If True, the browser will run in headless mode.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
headless: bool = True,
remove_selectors: Optional[List[str]] = None,
evaluator: Optional[PlaywrightEvaluator] = None,
):
"""Load a list of URLs using Playwright."""
try:
import playwright # noqa:F401
except ImportError:
raise ImportError(
"playwright package not found, please install it with "
"`pip install playwright`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headless = headless
if remove_selectors and evaluator:
raise ValueError(
"`remove_selectors` and `evaluator` cannot be both not None"
)
# Use the provided evaluator, if any, otherwise, use the default.
self.evaluator = evaluator or UnstructuredHtmlEvaluator(remove_selectors)
def load(self) -> List[Document]:
"""Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.sync_api import sync_playwright
docs: List[Document] = list()
with sync_playwright() as p:
browser = p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = browser.new_page()
response = page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = self.evaluator.evaluate(page, browser, response)
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
browser.close()
return docs
async def aload(self) -> List[Document]:
"""Load the specified URLs with Playwright and create Documents asynchronously.
Use this function when in a jupyter notebook environment.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.async_api import async_playwright
docs: List[Document] = list()
async with async_playwright() as p:
browser = await p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = await browser.new_page()
response = await page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = await self.evaluator.evaluate_async(page, browser, response)
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
await browser.close()
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~dingo.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class Dingo(VectorStore):
"""`Dingo` vector store.
To use, you should have the ``dingodb`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import Dingo
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
dingo = Dingo(embeddings, "text")
"""
def __init__(
self,
embedding: Embeddings,
text_key: str,
*,
client: Any = None,
index_name: Optional[str] = None,
dimension: int = 1024,
host: Optional[List[str]] = None,
user: str = "root",
password: str = "123123",
self_id: bool = False,
):
"""Initialize with Dingo client."""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb."
)
host = host if host is not None else ["172.20.31.10:13000"]
# collection
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
self._text_key = text_key
self._client = dingo_client
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
if self_id is True:
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
dingo_client.create_index(index_name, dimension=dimension)
self._index_name = index_name
self._embedding = embedding
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
batch_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(
self._index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return ids
def similarity_search(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, search_params=search_params
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(
self._index_name, xq=query_obj, top_k=k, search_params=search_params
)
if not results:
return []
for res in results[0]["vectorWithDistances"]:
metadatas = res["scalarData"]
id = res["id"]
score = res["distance"]
text = metadatas[self._text_key]["fields"][0]["data"]
metadata = {"id": id, "text": text, "score": score}
for meta_key in metadatas.keys():
metadata[meta_key] = metadatas[meta_key]["fields"][0]["data"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self._client.vector_search(
self._index_name, [embedding], search_params=search_params, top_k=k
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[
item["vector"]["floatValues"]
for item in results[0]["vectorWithDistances"]
],
k=k,
lambda_mult=lambda_mult,
)
selected = []
for i in mmr_selected:
meta_data = {}
for k, v in results[0]["vectorWithDistances"][i]["scalarData"].items():
meta_data.update({str(k): v["fields"][0]["data"]})
selected.append(meta_data)
return [
Document(page_content=metadata.pop(self._text_key), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, search_params
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
index_name: Optional[str] = None,
dimension: int = 1024,
client: Any = None,
host: List[str] = ["172.20.31.10:13000"],
user: str = "root",
password: str = "123123",
batch_size: int = 500,
**kwargs: Any,
) -> Dingo:
"""Construct Dingo wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Dingo index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Dingo
from langchain_community.embeddings import OpenAIEmbeddings
import dingodb
sss
embeddings = OpenAIEmbeddings()
dingo = Dingo.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb`."
)
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
if kwargs is not None and kwargs.get("self_id") is True:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(index_name, dimension=dimension)
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = dingo_client.vector_add(
index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return cls(embedding, text_key, client=dingo_client, index_name=index_name)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
return self._client.vector_delete(self._index_name, ids=ids)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~wandb_callback.py | import json
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.outputs import LLMResult
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
)
def import_wandb() -> Any:
"""Import the wandb python package and raise an error if it is not installed."""
try:
import wandb # noqa: F401
except ImportError:
raise ImportError(
"To use the wandb callback manager you need to have the `wandb` python "
"package installed. Please install it with `pip install wandb`"
)
return wandb
def load_json_to_dict(json_path: Union[str, Path]) -> dict:
"""Load json file to a dictionary.
Parameters:
json_path (str): The path to the json file.
Returns:
(dict): The dictionary representation of the json file.
"""
with open(json_path, "r") as f:
data = json.load(f)
return data
def analyze_text(
text: str,
complexity_metrics: bool = True,
visualize: bool = True,
nlp: Any = None,
output_dir: Optional[Union[str, Path]] = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
complexity_metrics (bool): Whether to compute complexity metrics.
visualize (bool): Whether to visualize the text.
nlp (spacy.lang): The spacy language model to use for visualization.
output_dir (str): The directory to save the visualization files to.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized in a wandb.Html element.
"""
resp = {}
textstat = import_textstat()
wandb = import_wandb()
spacy = import_spacy()
if complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update(text_complexity_metrics)
if visualize and nlp and output_dir is not None:
doc = nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html")
dep_output_path.open("w", encoding="utf-8").write(dep_out)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html")
ent_output_path.open("w", encoding="utf-8").write(ent_out)
text_visualizations = {
"dependency_tree": wandb.Html(str(dep_output_path)),
"entities": wandb.Html(str(ent_output_path)),
}
resp.update(text_visualizations)
return resp
def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(wandb.Html): The html element."""
wandb = import_wandb()
formatted_prompt = prompt.replace("\n", "<br>")
formatted_generation = generation.replace("\n", "<br>")
return wandb.Html(
f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
""",
inject=False,
)
class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Weights and Biases.
Parameters:
job_type (str): The type of job.
project (str): The project to log to.
entity (str): The entity to log to.
tags (list): The tags to log.
group (str): The group to log to.
name (str): The name of the run.
notes (str): The notes to log.
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics.
stream_logs (bool): Whether to stream callback actions to W&B
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response using the run.log() method to Weights and Biases.
"""
def __init__(
self,
job_type: Optional[str] = None,
project: Optional[str] = "langchain_callback_demo",
entity: Optional[str] = None,
tags: Optional[Sequence] = None,
group: Optional[str] = None,
name: Optional[str] = None,
notes: Optional[str] = None,
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
) -> None:
"""Initialize callback handler."""
wandb = import_wandb()
import_pandas()
import_textstat()
spacy = import_spacy()
super().__init__()
self.job_type = job_type
self.project = project
self.entity = entity
self.tags = tags
self.group = group
self.name = name
self.notes = notes
self.visualize = visualize
self.complexity_metrics = complexity_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
self.run: wandb.sdk.wandb_run.Run = wandb.init( # type: ignore
job_type=self.job_type,
project=self.project,
entity=self.entity,
tags=self.tags,
group=self.group,
name=self.name,
notes=self.notes,
)
warning = (
"DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor "
"of the `WandbTracer`. Please update your code to use the `WandbTracer` "
"instead."
)
wandb.termwarn(
warning,
repeat=False,
)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.run.log(prompt_resp)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(
analyze_text(
generation.text,
complexity_metrics=self.complexity_metrics,
visualize=self.visualize,
nlp=self.nlp,
output_dir=self.temp_dir.name,
)
)
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.run.log(generation_resp)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs["input"]
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
else:
raise ValueError("Unexpected data format provided!")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_chain_end", "outputs": outputs["output"]})
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.on_tool_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_finish_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_action_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = (
on_llm_start_records_df[["step", "prompts", "name"]]
.dropna(axis=1)
.rename({"step": "prompt_step"}, axis=1)
)
complexity_metrics_columns = []
visualizations_columns = []
if self.complexity_metrics:
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
if self.visualize:
visualizations_columns = ["dependency_tree", "entities"]
llm_outputs_df = (
on_llm_end_records_df[
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns
]
.dropna(axis=1)
.rename({"step": "output_step", "text": "output"}, axis=1)
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df["chat_html"] = session_analysis_df[
["prompts", "output"]
].apply(
lambda row: construct_html_from_prompt_and_generation(
row["prompts"], row["output"]
),
axis=1,
)
return session_analysis_df
def flush_tracker(
self,
langchain_asset: Any = None,
reset: bool = True,
finish: bool = False,
job_type: Optional[str] = None,
project: Optional[str] = None,
entity: Optional[str] = None,
tags: Optional[Sequence] = None,
group: Optional[str] = None,
name: Optional[str] = None,
notes: Optional[str] = None,
visualize: Optional[bool] = None,
complexity_metrics: Optional[bool] = None,
) -> None:
"""Flush the tracker and reset the session.
Args:
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
job_type: The job type.
project: The project.
entity: The entity.
tags: The tags.
group: The group.
name: The name.
notes: The notes.
visualize: Whether to visualize.
complexity_metrics: Whether to compute complexity metrics.
Returns:
None
"""
pd = import_pandas()
wandb = import_wandb()
action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records))
session_analysis_table = wandb.Table(
dataframe=self._create_session_analysis_df()
)
self.run.log(
{
"action_records": action_records_table,
"session_analysis": session_analysis_table,
}
)
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, "model.json")
model_artifact = wandb.Artifact(name="model", type="model")
model_artifact.add(action_records_table, name="action_records")
model_artifact.add(session_analysis_table, name="session_analysis")
try:
langchain_asset.save(langchain_asset_path)
model_artifact.add_file(str(langchain_asset_path))
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
model_artifact.add_file(str(langchain_asset_path))
model_artifact.metadata = load_json_to_dict(langchain_asset_path)
except NotImplementedError as e:
print("Could not save model.")
print(repr(e))
pass
self.run.log_artifact(model_artifact)
if finish or reset:
self.run.finish()
self.temp_dir.cleanup()
self.reset_callback_meta()
if reset:
self.__init__( # type: ignore
job_type=job_type if job_type else self.job_type,
project=project if project else self.project,
entity=entity if entity else self.entity,
tags=tags if tags else self.tags,
group=group if group else self.group,
name=name if name else self.name,
notes=notes if notes else self.notes,
visualize=visualize if visualize else self.visualize,
complexity_metrics=complexity_metrics
if complexity_metrics
else self.complexity_metrics,
)
| [
"\n",
"prompt_step",
"name"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~zilliz.py | import warnings
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
class ZillizRetriever(BaseRetriever):
"""`Zilliz API` retriever."""
embedding_function: Embeddings
"""The underlying embedding function from which documents will be retrieved."""
collection_name: str = "LangChainCollection"
"""The name of the collection in Zilliz."""
connection_args: Optional[Dict[str, Any]] = None
"""The connection arguments for the Zilliz client."""
consistency_level: str = "Session"
"""The consistency level for the Zilliz client."""
search_params: Optional[dict] = None
"""The search parameters for the Zilliz client."""
store: Zilliz
"""The underlying Zilliz store."""
retriever: BaseRetriever
"""The underlying retriever."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
values["store"] = Zilliz(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
)
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class QianfanLLMEndpoint(LLM):
"""Baidu Qianfan hosted open source or customized models.
To use, you should have the ``qianfan`` python package installed, and
the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with
your API key and Secret Key.
ak, sk are required parameters which you could get from
https://cloud.baidu.com/product/wenxinworkshop
Example:
.. code-block:: python
from langchain_community.llms import QianfanLLMEndpoint
qianfan_model = QianfanLLMEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
client: Any
qianfan_ak: Optional[str] = None
qianfan_sk: Optional[str] = None
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
model: str = "ERNIE-Bot-turbo"
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
default="",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
default="",
)
)
params = {
"model": values["model"],
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.Completion(**params)
except ImportError:
raise ImportError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "baidu-qianfan-endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
prompt: str,
**kwargs: Any,
) -> dict:
if "streaming" in kwargs:
kwargs["stream"] = kwargs.pop("streaming")
return {
**{"prompt": prompt, "model": self.model},
**self._default_params,
**kwargs,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
params = self._convert_prompt_msg_params(prompt, **kwargs)
response_payload = self.client.do(**params)
return response_payload["result"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
params = self._convert_prompt_msg_params(prompt, **kwargs)
response_payload = await self.client.ado(**params)
return response_payload["result"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
for res in self.client.do(**params):
if res:
chunk = GenerationChunk(text=res["result"])
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
async for res in await self.client.ado(**params):
if res:
chunk = GenerationChunk(text=res["result"])
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~chatglm.py | import logging
from typing import Any, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class ChatGLM(LLM):
"""ChatGLM LLM service.
Example:
.. code-block:: python
from langchain_community.llms import ChatGLM
endpoint_url = (
"http://127.0.0.1:8000"
)
ChatGLM_llm = ChatGLM(
endpoint_url=endpoint_url
)
"""
endpoint_url: str = "http://127.0.0.1:8000/"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
max_token: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
history: List[List] = []
"""History of the conversation"""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
with_history: bool = False
"""Whether to use history or not"""
@property
def _llm_type(self) -> str:
return "chat_glm"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm("Who are you?")
"""
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {"Content-Type": "application/json"}
payload = {
"prompt": prompt,
"temperature": self.temperature,
"history": self.history,
"max_length": self.max_token,
"top_p": self.top_p,
}
payload.update(_model_kwargs)
payload.update(kwargs)
logger.debug(f"ChatGLM payload: {payload}")
# call api
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logger.debug(f"ChatGLM response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
# Check if response content does exists
if isinstance(parsed_response, dict):
content_keys = "response"
if content_keys in parsed_response:
text = parsed_response[content_keys]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
if self.with_history:
self.history = self.history + [[None, parsed_response["response"]]]
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~edenai~text_moderation.py | from __future__ import annotations
import logging
from typing import Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiTextModerationTool(EdenaiTool):
"""Tool that queries the Eden AI Explicit text detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_explicit_content_detection_text"
description = (
"A wrapper around edenai Services explicit content detection for text. "
"""Useful for when you have to scan text for offensive,
sexually explicit or suggestive content,
it checks also if there is any content of self-harm,
violence, racist or hate speech."""
"""the structure of the output is :
'the type of the explicit content : the likelihood of it being explicit'
the likelihood is a number
between 1 and 5, 1 being the lowest and 5 the highest.
something is explicit if the likelihood is equal or higher than 3.
for example :
nsfw_likelihood: 1
this is not explicit.
for example :
nsfw_likelihood: 3
this is explicit.
"""
"Input should be a string."
)
language: str
feature: str = "text"
subfeature: str = "moderation"
def _parse_response(self, response: list) -> str:
formatted_result = []
for result in response:
if "nsfw_likelihood" in result.keys():
formatted_result.append(
"nsfw_likelihood: " + str(result["nsfw_likelihood"])
)
for label, likelihood in zip(result["label"], result["likelihood"]):
formatted_result.append(f'"{label}": {str(likelihood)}')
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"text": query, "language": self.language}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~agents~output_parsers~react_single_input.py | import re
from typing import Union
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "react-single-input"
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_scann.py | """Test ScaNN functionality."""
import datetime
import tempfile
import numpy as np
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.scann import (
ScaNN,
dependable_scann_import,
normalize,
)
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
def test_scann() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_mips_l2() -> None:
"""Test vector similarity with MIPS and L2."""
texts = ["foo", "bar", "baz"]
euclidean_search = ScaNN.from_texts(texts, FakeEmbeddings())
output = euclidean_search.similarity_search_with_score("foo", k=1)
expected_euclidean = [(Document(page_content="foo", metadata={}), 0.0)]
assert output == expected_euclidean
mips_search = ScaNN.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("foo", k=1)
expected_mips = [(Document(page_content="foo", metadata={}), 1.0)]
assert output == expected_mips
def test_scann_with_config() -> None:
"""Test ScaNN with approximate search config."""
texts = [str(i) for i in range(10000)]
# Create a config with dimension = 10, k = 10.
# Tree: search 10 leaves in a search tree of 100 leaves.
# Quantization: uses 16-centroid quantizer every 2 dimension.
# Reordering: reorder top 100 results.
scann_config = (
dependable_scann_import()
.scann_ops_pybind.builder(np.zeros(shape=(0, 10)), 10, "squared_l2")
.tree(num_leaves=100, num_leaves_to_search=10)
.score_ah(2)
.reorder(100)
.create_config()
)
mips_search = ScaNN.from_texts(
texts,
ConsistentFakeEmbeddings(),
scann_config=scann_config,
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("42", k=1)
expected = [(Document(page_content="42", metadata={}), 0.0)]
assert output == expected
def test_scann_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_sim_with_score_threshold() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_score_by_vector() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
def test_similarity_search_with_score_by_vector_with_score_threshold() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec,
k=2,
score_threshold=0.2,
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
assert output[0][1] < 0.2
def test_scann_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_with_metadatas_and_filter() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
assert output == [Document(page_content="bar", metadata={"page": 1})]
def test_scann_with_metadatas_and_list_filter() -> None:
texts = ["foo", "bar", "baz", "foo", "qux"]
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
docsearch.index_to_docstore_id[3]: Document(
page_content="foo", metadata={"page": 3}
),
docsearch.index_to_docstore_id[4]: Document(
page_content="qux", metadata={"page": 3}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]})
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_scann_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = ScaNN.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
def test_scann_normalize_l2() -> None:
"""Test normalize L2."""
texts = ["foo", "bar", "baz"]
emb = np.array(FakeEmbeddings().embed_documents(texts))
# Test norm is 1.
np.testing.assert_allclose(1, np.linalg.norm(normalize(emb), axis=-1))
# Test that there is no NaN after normalization.
np.testing.assert_array_equal(False, np.isnan(normalize(np.zeros(10))))
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~openapi~utils~api_models.py | """Pydantic models for parsing an OpenAPI spec."""
from __future__ import annotations
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec
logger = logging.getLogger(__name__)
PRIMITIVE_TYPES = {
"integer": int,
"number": float,
"string": str,
"boolean": bool,
"array": List,
"object": Dict,
"null": None,
}
# See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn
# for more info.
class APIPropertyLocation(Enum):
"""The location of the property."""
QUERY = "query"
PATH = "path"
HEADER = "header"
COOKIE = "cookie" # Not yet supported
@classmethod
def from_str(cls, location: str) -> "APIPropertyLocation":
"""Parse an APIPropertyLocation."""
try:
return cls(location)
except ValueError:
raise ValueError(
f"Invalid APIPropertyLocation. Valid values are {cls.__members__}"
)
_SUPPORTED_MEDIA_TYPES = ("application/json",)
SUPPORTED_LOCATIONS = {
APIPropertyLocation.QUERY,
APIPropertyLocation.PATH,
}
INVALID_LOCATION_TEMPL = (
'Unsupported APIPropertyLocation "{location}"'
" for parameter {name}. "
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}"
)
SCHEMA_TYPE = Union[str, Type, tuple, None, Enum]
class APIPropertyBase(BaseModel):
"""Base model for an API property."""
# The name of the parameter is required and is case-sensitive.
# If "in" is "path", the "name" field must correspond to a template expression
# within the path field in the Paths Object.
# If "in" is "header" and the "name" field is "Accept", "Content-Type",
# or "Authorization", the parameter definition is ignored.
# For all other cases, the "name" corresponds to the parameter
# name used by the "in" property.
name: str = Field(alias="name")
"""The name of the property."""
required: bool = Field(alias="required")
"""Whether the property is required."""
type: SCHEMA_TYPE = Field(alias="type")
"""The type of the property.
Either a primitive type, a component/parameter type,
or an array or 'object' (dict) of the above."""
default: Optional[Any] = Field(alias="default", default=None)
"""The default value of the property."""
description: Optional[str] = Field(alias="description", default=None)
"""The description of the property."""
if TYPE_CHECKING:
from openapi_pydantic import (
MediaType,
Parameter,
RequestBody,
Schema,
)
class APIProperty(APIPropertyBase):
"""A model for a property in the query, path, header, or cookie params."""
location: APIPropertyLocation = Field(alias="location")
"""The path/how it's being passed to the endpoint."""
@staticmethod
def _cast_schema_list_type(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
@staticmethod
def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum:
"""Get the schema type when the parameter is an enum."""
param_name = f"{parameter.name}Enum"
return Enum(param_name, {str(v): v for v in schema.enum})
@staticmethod
def _get_schema_type_for_array(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
from openapi_pydantic import (
Reference,
Schema,
)
items = schema.items
if isinstance(items, Schema):
schema_type = APIProperty._cast_schema_list_type(items)
elif isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
schema_type = ref_name # TODO: Add ref definitions to make his valid
else:
raise ValueError(f"Unsupported array items: {items}")
if isinstance(schema_type, str):
# TODO: recurse
schema_type = (schema_type,)
return schema_type
@staticmethod
def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE:
if schema is None:
return None
schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema)
if schema_type == "array":
schema_type = APIProperty._get_schema_type_for_array(schema)
elif schema_type == "object":
# TODO: Resolve array and object types to components.
raise NotImplementedError("Objects not yet supported")
elif schema_type in PRIMITIVE_TYPES:
if schema.enum:
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema)
else:
# Directly use the primitive type
pass
else:
raise NotImplementedError(f"Unsupported type: {schema_type}")
return schema_type
@staticmethod
def _validate_location(location: APIPropertyLocation, name: str) -> None:
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(
INVALID_LOCATION_TEMPL.format(location=location, name=name)
)
@staticmethod
def _validate_content(content: Optional[Dict[str, MediaType]]) -> None:
if content:
raise ValueError(
"API Properties with media content not supported. "
"Media content only supported within APIRequestBodyProperty's"
)
@staticmethod
def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]:
from openapi_pydantic import (
Reference,
Schema,
)
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f"Error dereferencing schema: {schema}")
return schema
@staticmethod
def is_supported_location(location: str) -> bool:
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
@classmethod
def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty":
"""Instantiate from an OpenAPI Parameter."""
location = APIPropertyLocation.from_str(parameter.param_in)
cls._validate_location(
location,
parameter.name,
)
cls._validate_content(parameter.content)
schema = cls._get_schema(parameter, spec)
schema_type = cls._get_schema_type(parameter, schema)
default_val = schema.default if schema is not None else None
return cls(
name=parameter.name,
location=location,
default=default_val,
description=parameter.description,
required=parameter.required,
type=schema_type,
)
class APIRequestBodyProperty(APIPropertyBase):
"""A model for a request body property."""
properties: List["APIRequestBodyProperty"] = Field(alias="properties")
"""The sub-properties of the property."""
# This is useful for handling nested property cycles.
# We can define separate types in that case.
references_used: List[str] = Field(alias="references_used")
"""The references used by the property."""
@classmethod
def _process_object_schema(
cls, schema: Schema, spec: OpenAPISpec, references_used: List[str]
) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]:
from openapi_pydantic import (
Reference,
)
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f"No properties found when processing object schema: {schema}"
)
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(
cls.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_props,
spec=spec,
references_used=references_used,
)
)
return schema.type, properties
@classmethod
def _process_array_schema(
cls,
schema: Schema,
name: str,
spec: OpenAPISpec,
references_used: List[str],
) -> str:
from openapi_pydantic import Reference, Schema
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f"Array<{ref_name}>"
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(
schema=items,
name=f"{name}Item",
required=True, # TODO: Add required
spec=spec,
references_used=references_used,
)
return f"Array<{array_type.type}>"
return "array"
@classmethod
def from_schema(
cls,
schema: Schema,
name: str,
required: bool,
spec: OpenAPISpec,
references_used: Optional[List[str]] = None,
) -> "APIRequestBodyProperty":
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == "object" and schema.properties:
schema_type, properties = cls._process_object_schema(
schema, spec, references_used
)
elif schema_type == "array":
schema_type = cls._process_array_schema(schema, name, spec, references_used)
elif schema_type in PRIMITIVE_TYPES:
# Use the primitive type directly
pass
elif schema_type is None:
# No typing specified/parsed. WIll map to 'any'
pass
else:
raise ValueError(f"Unsupported type: {schema_type}")
return cls(
name=name,
required=required,
type=schema_type,
default=schema.default,
description=schema.description,
properties=properties,
references_used=references_used,
)
# class APIRequestBodyProperty(APIPropertyBase):
class APIRequestBody(BaseModel):
"""A model for a request body."""
description: Optional[str] = Field(alias="description")
"""The description of the request body."""
properties: List[APIRequestBodyProperty] = Field(alias="properties")
# E.g., application/json - we only support JSON at the moment.
media_type: str = Field(alias="media_type")
"""The media type of the request body."""
@classmethod
def _process_supported_media_type(
cls,
media_type_obj: MediaType,
spec: OpenAPISpec,
) -> List[APIRequestBodyProperty]:
"""Process the media type of the request body."""
from openapi_pydantic import Reference
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split("/")[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f"Could not resolve schema for media type: {media_type_obj}"
)
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == "object" and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(
APIRequestBodyProperty.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_properties,
spec=spec,
)
)
else:
api_request_body_properties.append(
APIRequestBodyProperty(
name="body",
required=True,
type=schema.type,
default=schema.default,
description=schema.description,
properties=[],
references_used=references_used,
)
)
return api_request_body_properties
@classmethod
def from_request_body(
cls, request_body: RequestBody, spec: OpenAPISpec
) -> "APIRequestBody":
"""Instantiate from an OpenAPI RequestBody."""
properties = []
for media_type, media_type_obj in request_body.content.items():
if media_type not in _SUPPORTED_MEDIA_TYPES:
continue
api_request_body_properties = cls._process_supported_media_type(
media_type_obj,
spec,
)
properties.extend(api_request_body_properties)
return cls(
description=request_body.description,
properties=properties,
media_type=media_type,
)
# class APIRequestBodyProperty(APIPropertyBase):
# class APIRequestBody(BaseModel):
class APIOperation(BaseModel):
"""A model for a single API operation."""
operation_id: str = Field(alias="operation_id")
"""The unique identifier of the operation."""
description: Optional[str] = Field(alias="description")
"""The description of the operation."""
base_url: str = Field(alias="base_url")
"""The base URL of the operation."""
path: str = Field(alias="path")
"""The path of the operation."""
method: HTTPVerb = Field(alias="method")
"""The HTTP method of the operation."""
properties: Sequence[APIProperty] = Field(alias="properties")
# TODO: Add parse in used components to be able to specify what type of
# referenced object it is.
# """The properties of the operation."""
# components: Dict[str, BaseModel] = Field(alias="components")
request_body: Optional[APIRequestBody] = Field(alias="request_body")
"""The request body of the operation."""
@staticmethod
def _get_properties_from_parameters(
parameters: List[Parameter], spec: OpenAPISpec
) -> List[APIProperty]:
"""Get the properties of the operation."""
properties = []
for param in parameters:
if APIProperty.is_supported_location(param.param_in):
properties.append(APIProperty.from_parameter(param, spec))
elif param.required:
raise ValueError(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
)
else:
logger.warning(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
+ " Ignoring optional parameter"
)
pass
return properties
@classmethod
def from_openapi_url(
cls,
spec_url: str,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
@classmethod
def from_openapi_spec(
cls,
spec: OpenAPISpec,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI spec."""
operation = spec.get_operation(path, method)
parameters = spec.get_parameters_for_operation(operation)
properties = cls._get_properties_from_parameters(parameters, spec)
operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method)
request_body = spec.get_request_body_for_operation(operation)
api_request_body = (
APIRequestBody.from_request_body(request_body, spec)
if request_body is not None
else None
)
description = operation.description or operation.summary
if not description and spec.paths is not None:
description = spec.paths[path].description or spec.paths[path].summary
return cls(
operation_id=operation_id,
description=description or "",
base_url=spec.base_url,
path=path,
method=method,
properties=properties,
request_body=api_request_body,
)
@staticmethod
def ts_type_from_python(type_: SCHEMA_TYPE) -> str:
if type_ is None:
# TODO: Handle Nones better. These often result when
# parsing specs that are < v3
return "any"
elif isinstance(type_, str):
return {
"str": "string",
"integer": "number",
"float": "number",
"date-time": "string",
}.get(type_, type_)
elif isinstance(type_, tuple):
return f"Array<{APIOperation.ts_type_from_python(type_[0])}>"
elif isinstance(type_, type) and issubclass(type_, Enum):
return " | ".join([f"'{e.value}'" for e in type_])
else:
return str(type_)
def _format_nested_properties(
self, properties: List[APIRequestBodyProperty], indent: int = 2
) -> str:
"""Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
if prop.properties:
nested_props = self._format_nested_properties(
prop.properties, indent + 2
)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"{prop_desc}\n{' ' * indent}{prop_name}"
f"{prop_required}: {prop_type},"
)
return "\n".join(formatted_props)
def to_typescript(self) -> str:
"""Get typescript string representation of the operation."""
operation_name = self.operation_id
params = []
if self.request_body:
formatted_request_body_props = self._format_nested_properties(
self.request_body.properties
)
params.append(formatted_request_body_props)
for prop in self.properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},")
formatted_params = "\n".join(params).strip()
description_str = f"/* {self.description} */" if self.description else ""
typescript_definition = f"""
{description_str}
type {operation_name} = (_: {{
{formatted_params}
}}) => any;
"""
return typescript_definition.strip()
@property
def query_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.QUERY
]
@property
def path_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.PATH
]
@property
def body_params(self) -> List[str]:
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
| [] |
2024-01-10 | mth93/langchain | templates~cohere-librarian~cohere_librarian~blurb_matcher.py | import csv
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import CohereEmbeddings
from langchain.prompts import PromptTemplate
from langchain.vectorstores import Chroma
from .chat import chat
csv_file = open("data/books_with_blurbs.csv", "r")
csv_reader = csv.reader(csv_file)
csv_data = list(csv_reader)
parsed_data = [
{
"id": x[0],
"title": x[1],
"author": x[2],
"year": x[3],
"publisher": x[4],
"blurb": x[5],
}
for x in csv_data
]
parsed_data[1]
embeddings = CohereEmbeddings()
docsearch = Chroma.from_texts(
[x["title"] for x in parsed_data], embeddings, metadatas=parsed_data
).as_retriever()
prompt_template = """
{context}
Use the book reccommendations to suggest books for the user to read.
Only use the titles of the books, do not make up titles. Format the response as
a bulleted list prefixed by a relevant message.
User: {message}"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "message"]
)
book_rec_chain = {
"input_documents": lambda x: docsearch.get_relevant_documents(x["message"]),
"message": lambda x: x["message"],
} | load_qa_chain(chat, chain_type="stuff", prompt=PROMPT)
| [
"context",
"\n{context}\n\nUse the book reccommendations to suggest books for the user to read.\nOnly use the titles of the books, do not make up titles. Format the response as\na bulleted list prefixed by a relevant message.\n\nUser: {message}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~async_html.py | import asyncio
import logging
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, Iterator, List, Optional, Union, cast
import aiohttp
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", "No description found.")
if html := soup.find("html"):
metadata["language"] = html.get("lang", "No language found.")
return metadata
class AsyncHtmlLoader(BaseLoader):
"""Load `HTML` asynchronously."""
def __init__(
self,
web_path: Union[str, List[str]],
header_template: Optional[dict] = None,
verify_ssl: Optional[bool] = True,
proxies: Optional[dict] = None,
autoset_encoding: bool = True,
encoding: Optional[str] = None,
default_parser: str = "html.parser",
requests_per_second: int = 2,
requests_kwargs: Optional[Dict[str, Any]] = None,
raise_for_status: bool = False,
ignore_load_errors: bool = False,
):
"""Initialize with a webpage path."""
# TODO: Deprecate web_path in favor of web_paths, and remove this
# left like this because there are a number of loaders that expect single
# urls
if isinstance(web_path, str):
self.web_paths = [web_path]
elif isinstance(web_path, List):
self.web_paths = web_path
headers = header_template or default_header_template
if not headers.get("User-Agent"):
try:
from fake_useragent import UserAgent
headers["User-Agent"] = UserAgent().random
except ImportError:
logger.info(
"fake_useragent not found, using default user agent."
"To get a realistic header for requests, "
"`pip install fake_useragent`."
)
self.session = requests.Session()
self.session.headers = dict(headers)
self.session.verify = verify_ssl
if proxies:
self.session.proxies.update(proxies)
self.requests_per_second = requests_per_second
self.default_parser = default_parser
self.requests_kwargs = requests_kwargs or {}
self.raise_for_status = raise_for_status
self.autoset_encoding = autoset_encoding
self.encoding = encoding
self.ignore_load_errors = ignore_load_errors
def _fetch_valid_connection_docs(self, url: str) -> Any:
if self.ignore_load_errors:
try:
return self.session.get(url, **self.requests_kwargs)
except Exception as e:
warnings.warn(str(e))
return None
return self.session.get(url, **self.requests_kwargs)
@staticmethod
def _check_parser(parser: str) -> None:
"""Check that parser is valid for bs4."""
valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"]
if parser not in valid_parsers:
raise ValueError(
"`parser` must be one of " + ", ".join(valid_parsers) + "."
)
def _scrape(
self,
url: str,
parser: Union[str, None] = None,
bs_kwargs: Optional[dict] = None,
) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self._fetch_valid_connection_docs(url)
if not getattr(html_doc, "ok", False):
return None
if self.raise_for_status:
html_doc.raise_for_status()
if self.encoding is not None:
html_doc.encoding = self.encoding
elif self.autoset_encoding:
html_doc.encoding = html_doc.apparent_encoding
return BeautifulSoup(html_doc.text, parser, **(bs_kwargs or {}))
async def _fetch(
self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5
) -> str:
async with aiohttp.ClientSession() as session:
for i in range(retries):
try:
async with session.get(
url,
headers=self.session.headers,
ssl=None if self.session.verify else False,
) as response:
try:
text = await response.text()
except UnicodeDecodeError:
logger.error(f"Failed to decode content from {url}")
text = ""
return text
except aiohttp.ClientConnectionError as e:
if i == retries - 1 and self.ignore_load_errors:
logger.warning(f"Error fetching {url} after {retries} retries.")
return ""
elif i == retries - 1:
raise
else:
logger.warning(
f"Error fetching {url} with attempt "
f"{i + 1}/{retries}: {e}. Retrying..."
)
await asyncio.sleep(cooldown * backoff**i)
raise ValueError("retry count exceeded")
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
async with semaphore:
return await self._fetch(url)
async def fetch_all(self, urls: List[str]) -> Any:
"""Fetch all urls concurrently with rate limiting."""
semaphore = asyncio.Semaphore(self.requests_per_second)
tasks = []
for url in urls:
task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore))
tasks.append(task)
try:
from tqdm.asyncio import tqdm_asyncio
return await tqdm_asyncio.gather(
*tasks, desc="Fetching pages", ascii=True, mininterval=1
)
except ImportError:
warnings.warn("For better logging of progress, `pip install tqdm`")
return await asyncio.gather(*tasks)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for doc in self.load():
yield doc
def load(self) -> List[Document]:
"""Load text from the url(s) in web_path."""
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
# If there is a current event loop, we need to run the async code
# in a separate loop, in a separate thread.
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(asyncio.run, self.fetch_all(self.web_paths))
results = future.result()
except RuntimeError:
results = asyncio.run(self.fetch_all(self.web_paths))
docs = []
for i, text in enumerate(cast(List[str], results)):
soup = self._scrape(self.web_paths[i])
if not soup:
continue
metadata = _build_metadata(soup, self.web_paths[i])
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [
"{'User-Agent': '', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': 'https://www.google.com/', 'DNT': '1', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1'}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~cloudflare_workersai.py | from typing import Any, Dict, List
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra
DEFAULT_MODEL_NAME = "@cf/baai/bge-base-en-v1.5"
class CloudflareWorkersAIEmbeddings(BaseModel, Embeddings):
"""Cloudflare Workers AI embedding model.
To use, you need to provide an API token and
account ID to access Cloudflare Workers AI.
Example:
.. code-block:: python
from langchain_community.embeddings import CloudflareWorkersAIEmbeddings
account_id = "my_account_id"
api_token = "my_secret_api_token"
model_name = "@cf/baai/bge-small-en-v1.5"
cf = CloudflareWorkersAIEmbeddings(
account_id=account_id,
api_token=api_token,
model_name=model_name
)
"""
api_base_url: str = "https://api.cloudflare.com/client/v4/accounts"
account_id: str
api_token: str
model_name: str = DEFAULT_MODEL_NAME
batch_size: int = 50
strip_new_lines: bool = True
headers: Dict[str, str] = {"Authorization": "Bearer "}
def __init__(self, **kwargs: Any):
"""Initialize the Cloudflare Workers AI client."""
super().__init__(**kwargs)
self.headers = {"Authorization": f"Bearer {self.api_token}"}
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using Cloudflare Workers AI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
if self.strip_new_lines:
texts = [text.replace("\n", " ") for text in texts]
batches = [
texts[i : i + self.batch_size]
for i in range(0, len(texts), self.batch_size)
]
embeddings = []
for batch in batches:
response = requests.post(
f"{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}",
headers=self.headers,
json={"text": batch},
)
embeddings.extend(response.json()["result"]["data"])
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using Cloudflare Workers AI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ") if self.strip_new_lines else text
response = requests.post(
f"{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}",
headers=self.headers,
json={"text": [text]},
)
return response.json()["result"]["data"][0]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~octoai_endpoint.py | from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
class OctoAIEndpoint(LLM):
"""OctoAI LLM Endpoints.
OctoAIEndpoint is a class to interact with OctoAI
Compute Service large language model endpoints.
To use, you should have the ``octoai`` python package installed, and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate",
model_kwargs={
"max_new_tokens": 200,
"temperature": 0.75,
"top_p": 0.95,
"repetition_penalty": 1,
"seed": None,
"stop": [],
},
)
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://llama-2-7b-chat-demo-kk0powt97tmb.octoai.run/v1/chat/completions",
model_kwargs={
"model": "llama-2-7b-chat",
"messages": [
{
"role": "system",
"content": "Below is an instruction that describes a task.
Write a response that completes the request."
}
],
"stream": False,
"max_tokens": 256
}
)
"""
endpoint_url: Optional[str] = None
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
octoai_api_token: Optional[str] = None
"""OCTOAI API Token"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(
values, "octoai_api_token", "OCTOAI_API_TOKEN"
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "ENDPOINT_URL"
)
values["octoai_api_token"] = octoai_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OctoAI's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
_model_kwargs = self.model_kwargs or {}
try:
# Initialize the OctoAI client
from octoai import client
octoai_client = client.Client(token=self.octoai_api_token)
if "model" in _model_kwargs:
parameter_payload = _model_kwargs
parameter_payload["messages"].append(
{"role": "user", "content": prompt}
)
# Send the request using the OctoAI client
output = octoai_client.infer(self.endpoint_url, parameter_payload)
text = output.get("choices")[0].get("message").get("content")
else:
# Prepare the payload JSON
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# Send the request using the OctoAI client
resp_json = octoai_client.infer(self.endpoint_url, parameter_payload)
text = resp_json["generated_text"]
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
if stop is not None:
# Apply stop tokens when making calls to OctoAI
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~playwright~click.py | from __future__ import annotations
from typing import Optional, Type
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
class ClickTool(BaseBrowserTool):
"""Tool for clicking on an element with the given CSS selector."""
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(
self,
selector: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
async def _arun(
self,
selector: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
| [
"Click on an element with the given CSS selector"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~pgvector.py | from __future__ import annotations
import contextlib
import enum
import logging
import uuid
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
import sqlalchemy
from sqlalchemy import delete
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.orm import Session, relationship
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.runnables.config import run_in_executor
from libs.core.langchain_core.utils import get_from_dict_or_env
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
Base = declarative_base() # type: Any
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for the SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
_classes: Any = None
def _get_embedding_collection_store() -> Any:
global _classes
if _classes is not None:
return _classes
from pgvector.sqlalchemy import Vector
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(
cls, session: Session, name: str
) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first() # type: ignore
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
""" # noqa: E501
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = "langchain_pg_embedding"
collection_id = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding: Vector = sqlalchemy.Column(Vector(None))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
_classes = (EmbeddingStore, CollectionStore)
return _classes
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class PGVector(VectorStore):
"""`Postgres`/`PGVector` vector store.
To use, you should have the ``pgvector`` python package installed.
Args:
connection_string: Postgres connection string.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
engine_args: SQLAlchemy's create engine arguments.
Example:
.. code-block:: python
from langchain_community.vectorstores import PGVector
from langchain_community.embeddings.openai import OpenAIEmbeddings
CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = PGVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
)
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
*,
connection: Optional[sqlalchemy.engine.Connection] = None,
engine_args: Optional[dict[str, Any]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.engine_args = engine_args or {}
self._bind = connection if connection else self._create_engine()
self.__post_init__()
def __post_init__(
self,
) -> None:
"""Initialize the store."""
self.create_vector_extension()
EmbeddingStore, CollectionStore = _get_embedding_collection_store()
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection()
def __del__(self) -> None:
if isinstance(self._bind, sqlalchemy.engine.Connection):
self._bind.close()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def _create_engine(self) -> sqlalchemy.engine.Engine:
return sqlalchemy.create_engine(url=self.connection_string, **self.engine_args)
def create_vector_extension(self) -> None:
try:
with Session(self._bind) as session:
# The advisor lock fixes issue arising from concurrent
# creation of the vector extension.
# https://github.com/langchain-ai/langchain/issues/12933
# For more information see:
# https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS
statement = sqlalchemy.text(
"BEGIN;"
"SELECT pg_advisory_xact_lock(1573678846307946496);"
"CREATE EXTENSION IF NOT EXISTS vector;"
"COMMIT;"
)
session.execute(statement)
session.commit()
except Exception as e:
raise Exception(f"Failed to create vector extension: {e}") from e
def create_tables_if_not_exists(self) -> None:
with Session(self._bind) as session, session.begin():
Base.metadata.create_all(session.get_bind())
def drop_tables(self) -> None:
with Session(self._bind) as session, session.begin():
Base.metadata.drop_all(session.get_bind())
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._bind) as session:
self.CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._bind) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
@contextlib.contextmanager
def _make_session(self) -> Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._bind)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._bind) as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit()
def get_collection(self, session: Session) -> Any:
return self.CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._bind) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = self.EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
collection_id=collection.uuid,
)
session.add(embedding_store)
session.commit()
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
@property
def distance_strategy(self) -> Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results = self.__query_collection(embedding=embedding, k=k, filter=filter)
return self._results_to_docs_and_scores(results)
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def __query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Any]:
"""Query the collection."""
with Session(self._bind) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
IN, NIN = "in", "nin"
for key, value in filter.items():
if isinstance(value, dict):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
if IN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.in_(value_case_insensitive[IN])
elif NIN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.not_in(value_case_insensitive[NIN])
else:
filter_by_metadata = None
if filter_by_metadata is not None:
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = (
session.query(
self.EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid,
)
.limit(k)
.all()
)
return results
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return _results_to_docs(docs_and_scores)
@classmethod
def from_texts(
cls: Type[PGVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""Construct PGVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain_community.vectorstores import PGVector
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Get instance of an existing PGVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGVector],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
embedding_list = [result.EmbeddingStore.embedding for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
return await run_in_executor(
None,
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~clearml_callback.py | from __future__ import annotations
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.outputs import LLMResult
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
if TYPE_CHECKING:
import pandas as pd
def import_clearml() -> Any:
"""Import the clearml python package and raise an error if it is not installed."""
try:
import clearml # noqa: F401
except ImportError:
raise ImportError(
"To use the clearml callback manager you need to have the `clearml` python "
"package installed. Please install it with `pip install clearml`"
)
return clearml
class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to ClearML.
Parameters:
job_type (str): The type of clearml task such as "inference", "testing" or "qc"
project_name (str): The clearml project name
tags (list): Tags to add to the task
task_name (str): Name of the clearml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to ClearML
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to the ClearML console.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
project_name: Optional[str] = "langchain_callback_demo",
tags: Optional[Sequence] = None,
task_name: Optional[str] = None,
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
) -> None:
"""Initialize callback handler."""
clearml = import_clearml()
spacy = import_spacy()
super().__init__()
self.task_type = task_type
self.project_name = project_name
self.tags = tags
self.task_name = task_name
self.visualize = visualize
self.complexity_metrics = complexity_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
# Check if ClearML task already exists (e.g. in pipeline)
if clearml.Task.current_task():
self.task = clearml.Task.current_task()
else:
self.task = clearml.Task.init( # type: ignore
task_type=self.task_type,
project_name=self.project_name,
tags=self.tags,
task_name=self.task_name,
output_uri=True,
)
self.logger = self.task.get_logger()
warning = (
"The clearml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/allegroai/clearml/issues with the tag `langchain`."
)
self.logger.report_text(warning, level=30, print_console=True)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.logger.report_text(prompt_resp)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(self.analyze_text(generation.text))
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.logger.report_text(generation_resp)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs.get("input", inputs.get("human_input"))
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
else:
raise ValueError("Unexpected data format provided!")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_chain_end",
"outputs": outputs.get("output", outputs.get("text")),
}
)
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.on_tool_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_finish_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_action_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def analyze_text(self, text: str) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
textstat = import_textstat()
spacy = import_spacy()
if self.complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(
text
),
"dale_chall_readability_score": textstat.dale_chall_readability_score(
text
),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update(text_complexity_metrics)
if self.visualize and self.nlp and self.temp_dir.name is not None:
doc = self.nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
dep_output_path = Path(
self.temp_dir.name, hash_string(f"dep-{text}") + ".html"
)
dep_output_path.open("w", encoding="utf-8").write(dep_out)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
ent_output_path = Path(
self.temp_dir.name, hash_string(f"ent-{text}") + ".html"
)
ent_output_path.open("w", encoding="utf-8").write(ent_out)
self.logger.report_media(
"Dependencies Plot", text, local_path=dep_output_path
)
self.logger.report_media("Entities Plot", text, local_path=ent_output_path)
return resp
@staticmethod
def _build_llm_df(
base_df: pd.DataFrame, base_df_fields: Sequence, rename_map: Mapping
) -> pd.DataFrame:
base_df_fields = [field for field in base_df_fields if field in base_df]
rename_map = {
map_entry_k: map_entry_v
for map_entry_k, map_entry_v in rename_map.items()
if map_entry_k in base_df_fields
}
llm_df = base_df[base_df_fields].dropna(axis=1)
if rename_map:
llm_df = llm_df.rename(rename_map, axis=1)
return llm_df
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = ClearMLCallbackHandler._build_llm_df(
base_df=on_llm_end_records_df,
base_df_fields=["step", "prompts"]
+ (["name"] if "name" in on_llm_end_records_df else ["id"]),
rename_map={"step": "prompt_step"},
)
complexity_metrics_columns = []
visualizations_columns: List = []
if self.complexity_metrics:
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
llm_outputs_df = ClearMLCallbackHandler._build_llm_df(
on_llm_end_records_df,
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns,
{"step": "output_step", "text": "output"},
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
return session_analysis_df
def flush_tracker(
self,
name: Optional[str] = None,
langchain_asset: Any = None,
finish: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
pd = import_pandas()
clearml = import_clearml()
# Log the action records
self.logger.report_table(
"Action Records", name, table_plot=pd.DataFrame(self.action_records)
)
# Session analysis
session_analysis_df = self._create_session_analysis_df()
self.logger.report_table(
"Session Analysis", name, table_plot=session_analysis_df
)
if self.stream_logs:
self.logger.report_text(
{
"action_records": pd.DataFrame(self.action_records),
"session_analysis": session_analysis_df,
}
)
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, "model.json")
try:
langchain_asset.save(langchain_asset_path)
# Create output model and connect it to the task
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except NotImplementedError as e:
print("Could not save model.")
print(repr(e))
pass
# Cleanup after adding everything to ClearML
self.task.flush(wait_for_uploads=True)
self.temp_dir.cleanup()
self.temp_dir = tempfile.TemporaryDirectory()
self.reset_callback_meta()
if finish:
self.task.close()
| [
"name",
"prompt_step"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~ai21.py | from typing import Any, Dict, List, Optional, cast
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
class AI21PenaltyData(BaseModel):
"""Parameters for AI21 penalty data."""
scale: int = 0
applyToWhitespaces: bool = True
applyToPunctuations: bool = True
applyToNumbers: bool = True
applyToStopwords: bool = True
applyToEmojis: bool = True
class AI21(LLM):
"""AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``
set with your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import AI21
ai21 = AI21(ai21_api_key="my-api-key", model="j2-jumbo-instruct")
"""
model: str = "j2-jumbo-instruct"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
maxTokens: int = 256
"""The maximum number of tokens to generate in the completion."""
minTokens: int = 0
"""The minimum number of tokens to generate in the completion."""
topP: float = 1.0
"""Total probability mass of tokens to consider at each step."""
presencePenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens."""
countPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to count."""
frequencyPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to frequency."""
numResults: int = 1
"""How many completions to generate for each prompt."""
logitBias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
ai21_api_key: Optional[SecretStr] = None
stop: Optional[List[str]] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
ai21_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY")
)
values["ai21_api_key"] = ai21_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"temperature": self.temperature,
"maxTokens": self.maxTokens,
"minTokens": self.minTokens,
"topP": self.topP,
"presencePenalty": self.presencePenalty.dict(),
"countPenalty": self.countPenalty.dict(),
"frequencyPenalty": self.frequencyPenalty.dict(),
"numResults": self.numResults,
"logitBias": self.logitBias,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ai21"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ai21("Tell me a joke.")
"""
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
if self.base_url is not None:
base_url = self.base_url
else:
if self.model in ("j1-grande-instruct",):
base_url = "https://api.ai21.com/studio/v1/experimental"
else:
base_url = "https://api.ai21.com/studio/v1"
params = {**self._default_params, **kwargs}
self.ai21_api_key = cast(SecretStr, self.ai21_api_key)
response = requests.post(
url=f"{base_url}/{self.model}/complete",
headers={"Authorization": f"Bearer {self.ai21_api_key.get_secret_value()}"},
json={"prompt": prompt, "stopSequences": stop, **params},
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"AI21 /complete call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
response_json = response.json()
return response_json["completions"][0]["data"]["text"]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~modelscope_hub.py | from typing import Any, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra
class ModelScopeEmbeddings(BaseModel, Embeddings):
"""ModelScopeHub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
model_revision: Optional[str] = None
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
self.embed = pipeline(
Tasks.sentence_embedding,
model=self.model_id,
model_revision=self.model_revision,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~writer.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
class Writer(LLM):
"""Writer large language models.
To use, you should have the environment variable ``WRITER_API_KEY`` and
``WRITER_ORG_ID`` set with your API key and organization ID respectively.
Example:
.. code-block:: python
from langchain_community.llms import Writer
writer = Writer(model_id="palmyra-base")
"""
writer_org_id: Optional[str] = None
"""Writer organization ID."""
model_id: str = "palmyra-instruct"
"""Model name to use."""
min_tokens: Optional[int] = None
"""Minimum number of tokens to generate."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
temperature: Optional[float] = None
"""What sampling temperature to use."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens regardless of frequency."""
repetition_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
best_of: Optional[int] = None
"""Generates this many completions server-side and returns the "best"."""
logprobs: bool = False
"""Whether to return log probabilities."""
n: Optional[int] = None
"""How many completions to generate."""
writer_api_key: Optional[str] = None
"""Writer API key."""
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and organization id exist in environment."""
writer_api_key = get_from_dict_or_env(
values, "writer_api_key", "WRITER_API_KEY"
)
values["writer_api_key"] = writer_api_key
writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID")
values["writer_org_id"] = writer_org_id
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"minTokens": self.min_tokens,
"maxTokens": self.max_tokens,
"temperature": self.temperature,
"topP": self.top_p,
"stop": self.stop,
"presencePenalty": self.presence_penalty,
"repetitionPenalty": self.repetition_penalty,
"bestOf": self.best_of,
"logprobs": self.logprobs,
"n": self.n,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id, "writer_org_id": self.writer_org_id},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "writer"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
"https://enterprise-api.writer.com/llm"
f"/organization/{self.writer_org_id}"
f"/model/{self.model_id}/completions"
)
params = {**self._default_params, **kwargs}
response = requests.post(
url=base_url,
headers={
"Authorization": f"{self.writer_api_key}",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": prompt, **params},
)
text = response.text
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~sagemaker_callback.py | import json
import os
import shutil
import tempfile
from copy import deepcopy
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.outputs import LLMResult
from langchain_community.callbacks.utils import (
flatten_dict,
)
def save_json(data: dict, file_path: str) -> None:
"""Save dict to local file path.
Parameters:
data (dict): The dictionary to be saved.
file_path (str): Local file path.
"""
with open(file_path, "w") as outfile:
json.dump(data, outfile)
class SageMakerCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs prompt artifacts and metrics to SageMaker Experiments.
Parameters:
run (sagemaker.experiments.run.Run): Run object where the experiment is logged.
"""
def __init__(self, run: Any) -> None:
"""Initialize callback handler."""
super().__init__()
self.run = run
self.metrics = {
"step": 0,
"starts": 0,
"ends": 0,
"errors": 0,
"text_ctr": 0,
"chain_starts": 0,
"chain_ends": 0,
"llm_starts": 0,
"llm_ends": 0,
"llm_streams": 0,
"tool_starts": 0,
"tool_ends": 0,
"agent_ends": 0,
}
# Create a temporary directory
self.temp_dir = tempfile.mkdtemp()
def _reset(self) -> None:
for k, v in self.metrics.items():
self.metrics[k] = 0
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] += 1
self.metrics["starts"] += 1
llm_starts = self.metrics["llm_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp["prompt"] = prompt
self.jsonf(
prompt_resp,
self.temp_dir,
f"llm_start_{llm_starts}_prompt_{idx}",
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.metrics["step"] += 1
self.metrics["llm_streams"] += 1
llm_streams = self.metrics["llm_streams"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"llm_new_tokens_{llm_streams}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
self.metrics["llm_ends"] += 1
self.metrics["ends"] += 1
llm_ends = self.metrics["llm_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
self.jsonf(
resp,
self.temp_dir,
f"llm_end_{llm_ends}_generation_{idx}",
)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.metrics["step"] += 1
self.metrics["chain_starts"] += 1
self.metrics["starts"] += 1
chain_starts = self.metrics["chain_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.jsonf(input_resp, self.temp_dir, f"chain_start_{chain_starts}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"chain_end_{chain_ends}")
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_start_{tool_starts}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1
tool_ends = self.metrics["tool_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_end_{tool_ends}")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.metrics["step"] += 1
self.metrics["text_ctr"] += 1
text_ctr = self.metrics["text_ctr"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"on_text_{text_ctr}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
self.metrics["agent_ends"] += 1
self.metrics["ends"] += 1
agent_ends = self.metrics["agent_ends"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_finish_{agent_ends}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_action_{tool_starts}")
def jsonf(
self,
data: Dict[str, Any],
data_dir: str,
filename: str,
is_output: Optional[bool] = True,
) -> None:
"""To log the input data as json file artifact."""
file_path = os.path.join(data_dir, f"{filename}.json")
save_json(data, file_path)
self.run.log_file(file_path, name=filename, is_output=is_output)
def flush_tracker(self) -> None:
"""Reset the steps and delete the temporary local directory."""
self._reset()
shutil.rmtree(self.temp_dir)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~ollama.py | import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra
logger = logging.getLogger(__name__)
class OllamaEmbeddings(BaseModel, Embeddings):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain_community.embeddings import OllamaEmbeddings
ollama_emb = OllamaEmbeddings(
model="llama:7b",
)
r1 = ollama_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = ollama_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int] = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]] = None
"""Sets the stop tokens to use."""
tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
show_progress: bool = False
"""Whether to show a tqdm progress bar. Must have `tqdm` installed."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
}
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _process_emb_response(self, input: str) -> List[float]:
"""Process a response from the API.
Args:
response: The response from the API.
Returns:
The response as a dictionary.
"""
headers = {
"Content-Type": "application/json",
}
try:
res = requests.post(
f"{self.base_url}/api/embeddings",
headers=headers,
json={"model": self.model, "prompt": input, **self._default_params},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
return t["embedding"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
def _embed(self, input: List[str]) -> List[List[float]]:
if self.show_progress:
try:
from tqdm import tqdm
iter_ = tqdm(input, desc="OllamaEmbeddings")
except ImportError:
logger.warning(
"Unable to show progress bar because tqdm could not be imported. "
"Please install with `pip install tqdm`."
)
iter_ = input
else:
iter_ = input
return [self._process_emb_response(prompt) for prompt in iter_]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using an Ollama deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.embed_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Ollama deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_hologres.py | """Test Hologres functionality."""
import os
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.hologres import Hologres
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = Hologres.connection_string_from_db_params(
host=os.environ.get("TEST_HOLOGRES_HOST", "localhost"),
port=int(os.environ.get("TEST_HOLOGRES_PORT", "80")),
database=os.environ.get("TEST_HOLOGRES_DATABASE", "postgres"),
user=os.environ.get("TEST_HOLOGRES_USER", "postgres"),
password=os.environ.get("TEST_HOLOGRES_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_hologres() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Hologres.from_embeddings(
text_embeddings=text_embedding_pairs,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_hologres_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)]
def test_hologres_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~milvus.py | from __future__ import annotations
import logging
from typing import Any, Iterable, List, Optional, Tuple, Union
from uuid import uuid4
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
DEFAULT_MILVUS_CONNECTION = {
"host": "localhost",
"port": "19530",
"user": "",
"password": "",
"secure": False,
}
class Milvus(VectorStore):
"""`Milvus` vector store.
You need to install `pymilvus` and run Milvus.
See the following documentation for how to run a Milvus instance:
https://milvus.io/docs/install_standalone-docker.md
If looking for a hosted Milvus, take a look at this documentation:
https://zilliz.com/cloud and make use of the Zilliz vectorstore found in
this project.
IF USING L2/IP metric, IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA.
Args:
embedding_function (Embeddings): Function used to embed the text.
collection_name (str): Which Milvus collection to use. Defaults to
"LangChainCollection".
connection_args (Optional[dict[str, any]]): The connection args used for
this class comes in the form of a dict.
consistency_level (str): The consistency level to use for a collection.
Defaults to "Session".
index_params (Optional[dict]): Which index params to use. Defaults to
HNSW/AUTOINDEX depending on service.
search_params (Optional[dict]): Which search params to use. Defaults to
default of index.
drop_old (Optional[bool]): Whether to drop the current collection. Defaults
to False.
primary_field (str): Name of the primary key field. Defaults to "pk".
text_field (str): Name of the text field. Defaults to "text".
vector_field (str): Name of the vector field. Defaults to "vector".
The connection args used for this class comes in the form of a dict,
here are a few of the options:
address (str): The actual address of Milvus
instance. Example address: "localhost:19530"
uri (str): The uri of Milvus instance. Example uri:
"http://randomwebsite:19530",
"tcp:foobarsite:19530",
"https://ok.s3.south.com:19530".
host (str): The host of Milvus instance. Default at "localhost",
PyMilvus will fill in the default host if only port is provided.
port (str/int): The port of Milvus instance. Default at 19530, PyMilvus
will fill in the default port if only host is provided.
user (str): Use which user to connect to Milvus instance. If user and
password are provided, we will add related header in every RPC call.
password (str): Required when user is provided. The password
corresponding to the user.
secure (bool): Default is false. If set to true, tls will be enabled.
client_key_path (str): If use tls two-way authentication, need to
write the client.key path.
client_pem_path (str): If use tls two-way authentication, need to
write the client.pem path.
ca_pem_path (str): If use tls two-way authentication, need to write
the ca.pem path.
server_pem_path (str): If use tls one-way authentication, need to
write the server.pem path.
server_name (str): If use tls, need to write the common name.
Example:
.. code-block:: python
from langchain_community.vectorstores import Milvus
from langchain_community.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
# Connect to a milvus instance on localhost
milvus_store = Milvus(
embedding_function = Embeddings,
collection_name = "LangChainCollection",
drop_old = True,
)
Raises:
ValueError: If the pymilvus python package is not installed.
"""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[dict[str, Any]] = None,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: Optional[bool] = False,
*,
primary_field: str = "pk",
text_field: str = "text",
vector_field: str = "vector",
):
"""Initialize the Milvus vector store."""
try:
from pymilvus import Collection, utility
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please install it with `pip install pymilvus`."
)
# Default search params when one is not provided.
self.default_search_params = {
"IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "L2", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "L2", "params": {}},
}
self.embedding_func = embedding_function
self.collection_name = collection_name
self.index_params = index_params
self.search_params = search_params
self.consistency_level = consistency_level
# In order for a collection to be compatible, pk needs to be auto'id and int
self._primary_field = primary_field
# In order for compatibility, the text field will need to be called "text"
self._text_field = text_field
# In order for compatibility, the vector field needs to be called "vector"
self._vector_field = vector_field
self.fields: list[str] = []
# Create the connection to the server
if connection_args is None:
connection_args = DEFAULT_MILVUS_CONNECTION
self.alias = self._create_connection_alias(connection_args)
self.col: Optional[Collection] = None
# Grab the existing collection if it exists
if utility.has_collection(self.collection_name, using=self.alias):
self.col = Collection(
self.collection_name,
using=self.alias,
)
# If need to drop old, drop it
if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
# Initialize the vector store
self._init()
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
def _create_connection_alias(self, connection_args: dict) -> str:
"""Create the connection to the Milvus server."""
from pymilvus import MilvusException, connections
# Grab the connection arguments that are used for checking existing connection
host: str = connection_args.get("host", None)
port: Union[str, int] = connection_args.get("port", None)
address: str = connection_args.get("address", None)
uri: str = connection_args.get("uri", None)
user = connection_args.get("user", None)
# Order of use is host/port, uri, address
if host is not None and port is not None:
given_address = str(host) + ":" + str(port)
elif uri is not None:
given_address = uri.split("https://")[1]
elif address is not None:
given_address = address
else:
given_address = None
logger.debug("Missing standard address type for reuse attempt")
# User defaults to empty string when getting connection info
if user is not None:
tmp_user = user
else:
tmp_user = ""
# If a valid address was given, then check if a connection exists
if given_address is not None:
for con in connections.list_connections():
addr = connections.get_connection_addr(con[0])
if (
con[1]
and ("address" in addr)
and (addr["address"] == given_address)
and ("user" in addr)
and (addr["user"] == tmp_user)
):
logger.debug("Using previous connection: %s", con[0])
return con[0]
# Generate a new connection if one doesn't exist
alias = uuid4().hex
try:
connections.connect(alias=alias, **connection_args)
logger.debug("Created new connection using: %s", alias)
return alias
except MilvusException as e:
logger.error("Failed to create new connection using: %s", alias)
raise e
def _init(
self, embeddings: Optional[list] = None, metadatas: Optional[list[dict]] = None
) -> None:
if embeddings is not None:
self._create_collection(embeddings, metadatas)
self._extract_fields()
self._create_index()
self._create_search_params()
self._load()
def _create_collection(
self, embeddings: list, metadatas: Optional[list[dict]] = None
) -> None:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
MilvusException,
)
from pymilvus.orm.types import infer_dtype_bydata
# Determine embedding dim
dim = len(embeddings[0])
fields = []
# Determine metadata schema
if metadatas:
# Create FieldSchema for each entry in metadata.
for key, value in metadatas[0].items():
# Infer the corresponding datatype of the metadata
dtype = infer_dtype_bydata(value)
# Datatype isn't compatible
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
"Failure to create collection, unrecognized dtype for key: %s",
key,
)
raise ValueError(f"Unrecognized datatype for {key}.")
# Dataype is a string/varchar equivalent
elif dtype == DataType.VARCHAR:
fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65_535))
else:
fields.append(FieldSchema(key, dtype))
# Create the text field
fields.append(
FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535)
)
# Create the primary key field
fields.append(
FieldSchema(
self._primary_field, DataType.INT64, is_primary=True, auto_id=True
)
)
# Create the vector field, supports binary or float vectors
fields.append(
FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim)
)
# Create the schema for the collection
schema = CollectionSchema(fields)
# Create the collection
try:
self.col = Collection(
name=self.collection_name,
schema=schema,
consistency_level=self.consistency_level,
using=self.alias,
)
except MilvusException as e:
logger.error(
"Failed to create collection: %s error: %s", self.collection_name, e
)
raise e
def _extract_fields(self) -> None:
"""Grab the existing fields from the Collection"""
from pymilvus import Collection
if isinstance(self.col, Collection):
schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
# Since primary field is auto-id, no need to track it
self.fields.remove(self._primary_field)
def _get_index(self) -> Optional[dict[str, Any]]:
"""Return the vector index information if it exists"""
from pymilvus import Collection
if isinstance(self.col, Collection):
for x in self.col.indexes:
if x.field_name == self._vector_field:
return x.to_dict()
return None
def _create_index(self) -> None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
# If no index params, use a default HNSW based one
if self.index_params is None:
self.index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
try:
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
# If default did not work, most likely on Zilliz Cloud
except MilvusException:
# Use AUTOINDEX based index
self.index_params = {
"metric_type": "L2",
"index_type": "AUTOINDEX",
"params": {},
}
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index on collection: %s", self.collection_name
)
raise e
def _create_search_params(self) -> None:
"""Generate search params based on the current index type"""
from pymilvus import Collection
if isinstance(self.col, Collection) and self.search_params is None:
index = self._get_index()
if index is not None:
index_type: str = index["index_param"]["index_type"]
metric_type: str = index["index_param"]["metric_type"]
self.search_params = self.default_search_params[index_type]
self.search_params["metric_type"] = metric_type
def _load(self) -> None:
"""Load the collection if available."""
from pymilvus import Collection
if isinstance(self.col, Collection) and self._get_index() is not None:
self.col.load()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
"""
from pymilvus import Collection, MilvusException
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
# If the collection hasn't been initialized yet, perform all steps to do so
if not isinstance(self.col, Collection):
self._init(embeddings, metadatas)
# Dict to hold all insert columns
insert_dict: dict[str, list] = {
self._text_field: texts,
self._vector_field: embeddings,
}
# Collect the metadata into the insert dict.
if metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Total insert count
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self.col, Collection)
for i in range(0, total_count, batch_size):
# Grab end index
end = min(i + batch_size, total_count)
# Convert dict to list of lists batch for insertion
insert_list = [insert_dict[x][i:end] for x in self.fields]
# Insert into the collection.
try:
res: Collection
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
pks.extend(res.primary_keys)
except MilvusException as e:
logger.error(
"Failed to insert batch starting at entity: %s/%s", i, total_count
)
raise e
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
query (str): The text to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
query (str): The text being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[float], List[Tuple[Document, any, any]]:
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Tuple[Document, float]]: Result doc and score.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ret = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
pair = (doc, result.score)
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
embedding (str): The embedding vector being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=fetch_k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ids = []
documents = []
scores = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
documents.append(doc)
scores.append(result.score)
ids.append(result.id)
vectors = self.col.query(
expr=f"{self._primary_field} in {ids}",
output_fields=[self._primary_field, self._vector_field],
timeout=timeout,
)
# Reorganize the results from query to match search order.
vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors}
ordered_result_embeddings = [vectors[x] for x in ids]
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "LangChainCollection",
connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: bool = False,
**kwargs: Any,
) -> Milvus:
"""Create a Milvus collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use. Defaults
to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Milvus: Milvus Vector Store
"""
vector_db = cls(
embedding_function=embedding,
collection_name=collection_name,
connection_args=connection_args,
consistency_level=consistency_level,
index_params=index_params,
search_params=search_params,
drop_old=drop_old,
**kwargs,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~loading.py | """Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from libs.core.langchain_core.prompts.loading import (
_load_output_parser,
load_prompt,
load_prompt_from_config,
)
from langchain.chains import ReduceDocumentsChain
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.graph_qa.cypher import GraphCypherQAChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "reduce_documents_chain" in config:
reduce_documents_chain = load_chain_from_config(
config.pop("reduce_documents_chain")
)
elif "reduce_documents_chain_path" in config:
reduce_documents_chain = load_chain(config.pop("reduce_documents_chain_path"))
else:
reduce_documents_chain = _load_reduce_documents_chain(config)
return MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
**config,
)
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
combine_documents_chain = None
collapse_documents_chain = None
if "combine_documents_chain" in config:
combine_document_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
elif "combine_document_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
if "collapse_documents_chain" in config:
collapse_document_chain_config = config.pop("collapse_documents_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_documents_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_documents_chain_path")
)
elif "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_documents_chain = None
else:
collapse_documents_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_documents_chain = load_chain(
config.pop("collapse_document_chain_path")
)
return ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.llm_bash.base import LLMBashChain
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
# llm attribute is deprecated in favor of llm_chain, here to support old configs
elif "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
# llm_path attribute is deprecated in favor of llm_chain_path,
# its to support old configs
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
if llm_chain:
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
else:
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
# llm attribute is deprecated in favor of llm_chain, here to support old configs
elif "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
# llm_path attribute is deprecated in favor of llm_chain_path,
# its to support old configs
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
if llm_chain:
return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config)
else:
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.pal_chain import PALChain
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
return PALChain(llm_chain=llm_chain, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_path` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_path` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
from langchain_experimental.sql import SQLDatabaseChain
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
chain = load_chain_from_config(llm_chain_config)
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
else:
prompt = None
return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
raise ValueError("`retriever` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return RetrievalQA(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_retrieval_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> RetrievalQAWithSourcesChain:
if "retriever" in kwargs:
retriever = kwargs.pop("retriever")
else:
raise ValueError("`retriever` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return RetrievalQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
retriever=retriever,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
if "graph" in kwargs:
graph = kwargs.pop("graph")
else:
raise ValueError("`graph` must be present.")
if "cypher_generation_chain" in config:
cypher_generation_chain_config = config.pop("cypher_generation_chain")
cypher_generation_chain = load_chain_from_config(cypher_generation_chain_config)
else:
raise ValueError("`cypher_generation_chain` must be present.")
if "qa_chain" in config:
qa_chain_config = config.pop("qa_chain")
qa_chain = load_chain_from_config(qa_chain_config)
else:
raise ValueError("`qa_chain` must be present.")
return GraphCypherQAChain(
graph=graph,
cypher_generation_chain=cypher_generation_chain,
qa_chain=qa_chain,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"reduce_documents_chain": _load_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
"retrieval_qa": _load_retrieval_qa,
"retrieval_qa_with_sources_chain": _load_retrieval_qa_with_sources_chain,
"graph_cypher_chain": _load_graph_cypher_chain,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"list_assertions_prompt",
"create_draft_answer_prompt",
"revised_answer_prompt",
"revised_answer_prompt_path",
"None",
"document_prompt",
"create_draft_answer_prompt_path",
"check_assertions_prompt",
"document_prompt_path",
"prompt_path",
"check_assertions_prompt_path",
"list_assertions_prompt_path"
] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~hugegraph.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
CYPHER_QA_PROMPT,
GREMLIN_GENERATION_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.graphs.hugegraph import HugeGraph
class HugeGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating gremlin statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: HugeGraph = Field(exclude=True)
gremlin_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT,
**kwargs: Any,
) -> HugeGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt)
return cls(
qa_chain=qa_chain,
gremlin_generation_chain=gremlin_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate gremlin statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_gremlin = self.gremlin_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_gremlin, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_gremlin)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~minimax.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
class MiniMaxEmbeddings(BaseModel, Embeddings):
"""MiniMax's embedding service.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = None
"""Group ID for MiniMax API."""
minimax_api_key: Optional[SecretStr] = None
"""API Key for MiniMax API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
minimax_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "minimax_api_key", "MINIMAX_API_KEY")
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
def embed(
self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0]
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~contextual_compression.py | from typing import Any, List
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
class ContextualCompressionRetriever(BaseRetriever):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: BaseRetriever
"""Base Retriever to use for getting relevant documents."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(
query, callbacks=run_manager.get_child(), **kwargs
)
if docs:
compressed_docs = self.base_compressor.compress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
else:
return []
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.aget_relevant_documents(
query, callbacks=run_manager.get_child(), **kwargs
)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
else:
return []
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~labelstudio_callback.py | import os
import warnings
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import UUID
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.messages import BaseMessage, ChatMessage
from libs.core.langchain_core.outputs import Generation, LLMResult
class LabelStudioMode(Enum):
"""Label Studio mode enumerator."""
PROMPT = "prompt"
CHAT = "chat"
def get_default_label_configs(
mode: Union[str, LabelStudioMode],
) -> Tuple[str, LabelStudioMode]:
"""Get default Label Studio configs for the given mode.
Parameters:
mode: Label Studio mode ("prompt" or "chat")
Returns: Tuple of Label Studio config and mode
"""
_default_label_configs = {
LabelStudioMode.PROMPT.value: """
<View>
<Style>
.prompt-box {
background-color: white;
border-radius: 10px;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
padding: 20px;
}
</Style>
<View className="root">
<View className="prompt-box">
<Text name="prompt" value="$prompt"/>
</View>
<TextArea name="response" toName="prompt"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="prompt"/>
</View>""",
LabelStudioMode.CHAT.value: """
<View>
<View className="root">
<Paragraphs name="dialogue"
value="$prompt"
layout="dialogue"
textKey="content"
nameKey="role"
granularity="sentence"/>
<Header value="Final response:"/>
<TextArea name="response" toName="dialogue"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="dialogue"/>
</View>""",
}
if isinstance(mode, str):
mode = LabelStudioMode(mode)
return _default_label_configs[mode.value], mode
class LabelStudioCallbackHandler(BaseCallbackHandler):
"""Label Studio callback handler.
Provides the ability to send predictions to Label Studio
for human evaluation, feedback and annotation.
Parameters:
api_key: Label Studio API key
url: Label Studio URL
project_id: Label Studio project ID
project_name: Label Studio project name
project_config: Label Studio project config (XML)
mode: Label Studio mode ("prompt" or "chat")
Examples:
>>> from langchain_community.llms import OpenAI
>>> from langchain_community.callbacks import LabelStudioCallbackHandler
>>> handler = LabelStudioCallbackHandler(
... api_key='<your_key_here>',
... url='http://localhost:8080',
... project_name='LangChain-%Y-%m-%d',
... mode='prompt'
... )
>>> llm = OpenAI(callbacks=[handler])
>>> llm.predict('Tell me a story about a dog.')
"""
DEFAULT_PROJECT_NAME: str = "LangChain-%Y-%m-%d"
def __init__(
self,
api_key: Optional[str] = None,
url: Optional[str] = None,
project_id: Optional[int] = None,
project_name: str = DEFAULT_PROJECT_NAME,
project_config: Optional[str] = None,
mode: Union[str, LabelStudioMode] = LabelStudioMode.PROMPT,
):
super().__init__()
# Import LabelStudio SDK
try:
import label_studio_sdk as ls
except ImportError:
raise ImportError(
f"You're using {self.__class__.__name__} in your code,"
f" but you don't have the LabelStudio SDK "
f"Python package installed or upgraded to the latest version. "
f"Please run `pip install -U label-studio-sdk`"
f" before using this callback."
)
# Check if Label Studio API key is provided
if not api_key:
if os.getenv("LABEL_STUDIO_API_KEY"):
api_key = str(os.getenv("LABEL_STUDIO_API_KEY"))
else:
raise ValueError(
f"You're using {self.__class__.__name__} in your code,"
f" Label Studio API key is not provided. "
f"Please provide Label Studio API key: "
f"go to the Label Studio instance, navigate to "
f"Account & Settings -> Access Token and copy the key. "
f"Use the key as a parameter for the callback: "
f"{self.__class__.__name__}"
f"(label_studio_api_key='<your_key_here>', ...) or "
f"set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>"
)
self.api_key = api_key
if not url:
if os.getenv("LABEL_STUDIO_URL"):
url = os.getenv("LABEL_STUDIO_URL")
else:
warnings.warn(
f"Label Studio URL is not provided, "
f"using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}"
f"If you want to provide your own URL, use the parameter: "
f"{self.__class__.__name__}"
f"(label_studio_url='<your_url_here>', ...) "
f"or set the environment variable LABEL_STUDIO_URL=<your_url_here>"
)
url = ls.LABEL_STUDIO_DEFAULT_URL
self.url = url
# Maps run_id to prompts
self.payload: Dict[str, Dict] = {}
self.ls_client = ls.Client(url=self.url, api_key=self.api_key)
self.project_name = project_name
if project_config:
self.project_config = project_config
self.mode = None
else:
self.project_config, self.mode = get_default_label_configs(mode)
self.project_id = project_id or os.getenv("LABEL_STUDIO_PROJECT_ID")
if self.project_id is not None:
self.ls_project = self.ls_client.get_project(int(self.project_id))
else:
project_title = datetime.today().strftime(self.project_name)
existing_projects = self.ls_client.get_projects(title=project_title)
if existing_projects:
self.ls_project = existing_projects[0]
self.project_id = self.ls_project.id
else:
self.ls_project = self.ls_client.create_project(
title=project_title, label_config=self.project_config
)
self.project_id = self.ls_project.id
self.parsed_label_config = self.ls_project.parsed_label_config
# Find the first TextArea tag
# "from_name", "to_name", "value" will be used to create predictions
self.from_name, self.to_name, self.value, self.input_type = (
None,
None,
None,
None,
)
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info["type"] == "TextArea":
self.from_name = tag_name
self.to_name = tag_info["to_name"][0]
self.value = tag_info["inputs"][0]["value"]
self.input_type = tag_info["inputs"][0]["type"]
break
if not self.from_name:
error_message = (
f'Label Studio project "{self.project_name}" '
f"does not have a TextArea tag. "
f"Please add a TextArea tag to the project."
)
if self.mode == LabelStudioMode.PROMPT:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
' and select "Generative AI -> '
'Supervised Language Model Fine-tuning" template.'
)
else:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
" and check available templates under "
'"Generative AI" section.'
)
raise ValueError(error_message)
def add_prompts_generations(
self, run_id: str, generations: List[List[Generation]]
) -> None:
# Create tasks in Label Studio
tasks = []
prompts = self.payload[run_id]["prompts"]
model_version = (
self.payload[run_id]["kwargs"]
.get("invocation_params", {})
.get("model_name")
)
for prompt, generation in zip(prompts, generations):
tasks.append(
{
"data": {
self.value: prompt,
"run_id": run_id,
},
"predictions": [
{
"result": [
{
"from_name": self.from_name,
"to_name": self.to_name,
"type": "textarea",
"value": {"text": [g.text for g in generation]},
}
],
"model_version": model_version,
}
],
}
)
self.ls_project.import_tasks(tasks)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Text":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Text>.\n"
f"Read more here https://labelstud.io/tags/text"
)
run_id = str(kwargs["run_id"])
self.payload[run_id] = {"prompts": prompts, "kwargs": kwargs}
def _get_message_role(self, message: BaseMessage) -> str:
"""Get the role of the message."""
if isinstance(message, ChatMessage):
return message.role
else:
return message.__class__.__name__
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Paragraphs":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Paragraphs>.\n"
f"Read more here https://labelstud.io/tags/paragraphs"
)
prompts = []
for message_list in messages:
dialog = []
for message in message_list:
dialog.append(
{
"role": self._get_message_role(message),
"content": message.content,
}
)
prompts.append(dialog)
self.payload[str(run_id)] = {
"prompts": prompts,
"tags": tags,
"metadata": metadata,
"run_id": run_id,
"parent_run_id": parent_run_id,
"kwargs": kwargs,
}
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Create a new Label Studio task for each prompt and generation."""
run_id = str(kwargs["run_id"])
# Submit results to Label Studio
self.add_prompts_generations(run_id, response.generations)
# Pop current run from `self.runs`
self.payload.pop(run_id)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
| [
"[]",
"prompt"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~surrealdb.py | import asyncio
from typing import (
Any,
Iterable,
List,
Optional,
Tuple,
)
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
class SurrealDBStore(VectorStore):
"""
SurrealDB as Vector Store.
To use, you should have the ``surrealdb`` python package installed.
Args:
embedding_function: Embedding function to use.
dburl: SurrealDB connection url
ns: surrealdb namespace for the vector store. (default: "langchain")
db: surrealdb database for the vector store. (default: "database")
collection: surrealdb collection for the vector store.
(default: "documents")
(optional) db_user and db_pass: surrealdb credentials
Example:
.. code-block:: python
from langchain.vectorstores.surrealdb import SurrealDBStore
from langchain.embeddings import HuggingFaceEmbeddings
embedding_function = HuggingFaceEmbeddings()
dburl = "ws://localhost:8000/rpc"
ns = "langchain"
db = "docstore"
collection = "documents"
db_user = "root"
db_pass = "root"
sdb = SurrealDBStore.from_texts(
texts=texts,
embedding=embedding_function,
dburl,
ns, db, collection,
db_user=db_user, db_pass=db_pass)
"""
def __init__(
self,
embedding_function: Embeddings,
**kwargs: Any,
) -> None:
from surrealdb import Surreal
self.collection = kwargs.pop("collection", "documents")
self.ns = kwargs.pop("ns", "langchain")
self.db = kwargs.pop("db", "database")
self.dburl = kwargs.pop("dburl", "ws://localhost:8000/rpc")
self.embedding_function = embedding_function
self.sdb = Surreal(self.dburl)
self.kwargs = kwargs
async def initialize(self) -> None:
"""
Initialize connection to surrealdb database
and authenticate if credentials are provided
"""
await self.sdb.connect(self.dburl)
if "db_user" in self.kwargs and "db_pass" in self.kwargs:
user = self.kwargs.get("db_user")
password = self.kwargs.get("db_pass")
await self.sdb.signin({"user": user, "pass": password})
await self.sdb.use(self.ns, self.db)
@property
def embeddings(self) -> Optional[Embeddings]:
return (
self.embedding_function
if isinstance(self.embedding_function, Embeddings)
else None
)
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add list of text along with embeddings to the vector store asynchronously
Args:
texts (Iterable[str]): collection of text to add to the database
Returns:
List of ids for the newly inserted documents
"""
embeddings = self.embedding_function.embed_documents(list(texts))
ids = []
for idx, text in enumerate(texts):
data = {"text": text, "embedding": embeddings[idx]}
if metadatas is not None and idx < len(metadatas):
data["metadata"] = metadatas[idx]
record = await self.sdb.create(
self.collection,
data,
)
ids.append(record[0]["id"])
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add list of text along with embeddings to the vector store
Args:
texts (Iterable[str]): collection of text to add to the database
Returns:
List of ids for the newly inserted documents
"""
async def _add_texts(
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
await self.initialize()
return await self.aadd_texts(texts, metadatas, **kwargs)
return asyncio.run(_add_texts(texts, metadatas, **kwargs))
async def adelete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by document ID asynchronously.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise.
"""
if ids is None:
await self.sdb.delete(self.collection)
return True
else:
if isinstance(ids, str):
await self.sdb.delete(ids)
return True
else:
if isinstance(ids, list) and len(ids) > 0:
_ = [await self.sdb.delete(id) for id in ids]
return True
return False
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by document ID.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise.
"""
async def _delete(ids: Optional[List[str]], **kwargs: Any) -> Optional[bool]:
await self.initialize()
return await self.adelete(ids=ids, **kwargs)
return asyncio.run(_delete(ids, **kwargs))
async def _asimilarity_search_by_vector_with_score(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search for query embedding asynchronously
and return documents and scores
Args:
embedding (List[float]): Query embedding.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar along with scores
"""
args = {
"collection": self.collection,
"embedding": embedding,
"k": k,
"score_threshold": kwargs.get("score_threshold", 0),
}
query = """select id, text, metadata,
vector::similarity::cosine(embedding,{embedding}) as similarity
from {collection}
where vector::similarity::cosine(embedding,{embedding}) >= {score_threshold}
order by similarity desc LIMIT {k}
""".format(**args)
results = await self.sdb.query(query)
if len(results) == 0:
return []
return [
(
Document(
page_content=result["text"],
metadata={"id": result["id"], **result["metadata"]},
),
result["similarity"],
)
for result in results[0]["result"]
]
async def asimilarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search asynchronously and return relevance scores
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar along with relevance scores
"""
query_embedding = self.embedding_function.embed_query(query)
return [
(document, similarity)
for document, similarity in (
await self._asimilarity_search_by_vector_with_score(
query_embedding, k, **kwargs
)
)
]
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search synchronously and return relevance scores
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar along with relevance scores
"""
async def _similarity_search_with_relevance_scores() -> (
List[Tuple[Document, float]]
):
await self.initialize()
return await self.asimilarity_search_with_relevance_scores(
query, k, **kwargs
)
return asyncio.run(_similarity_search_with_relevance_scores())
async def asimilarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search asynchronously and return distance scores
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar along with relevance distance scores
"""
query_embedding = self.embedding_function.embed_query(query)
return [
(document, similarity)
for document, similarity in (
await self._asimilarity_search_by_vector_with_score(
query_embedding, k, **kwargs
)
)
]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search synchronously and return distance scores
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar along with relevance distance scores
"""
async def _similarity_search_with_score() -> List[Tuple[Document, float]]:
await self.initialize()
return await self.asimilarity_search_with_score(query, k, **kwargs)
return asyncio.run(_similarity_search_with_score())
async def asimilarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Run similarity search on query embedding asynchronously
Args:
embedding (List[float]): Query embedding
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
return [
document
for document, _ in await self._asimilarity_search_by_vector_with_score(
embedding, k, **kwargs
)
]
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Run similarity search on query embedding
Args:
embedding (List[float]): Query embedding
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search_by_vector() -> List[Document]:
await self.initialize()
return await self.asimilarity_search_by_vector(embedding, k, **kwargs)
return asyncio.run(_similarity_search_by_vector())
async def asimilarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Run similarity search on query asynchronously
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
query_embedding = self.embedding_function.embed_query(query)
return await self.asimilarity_search_by_vector(query_embedding, k, **kwargs)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Run similarity search on query
Args:
query (str): Query
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search() -> List[Document]:
await self.initialize()
return await self.asimilarity_search(query, k, **kwargs)
return asyncio.run(_similarity_search())
@classmethod
async def afrom_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "SurrealDBStore":
"""Create SurrealDBStore from list of text asynchronously
Args:
texts (List[str]): list of text to vectorize and store
embedding (Optional[Embeddings]): Embedding function.
dburl (str): SurrealDB connection url
(default: "ws://localhost:8000/rpc")
ns (str): surrealdb namespace for the vector store.
(default: "langchain")
db (str): surrealdb database for the vector store.
(default: "database")
collection (str): surrealdb collection for the vector store.
(default: "documents")
(optional) db_user and db_pass: surrealdb credentials
Returns:
SurrealDBStore object initialized and ready for use."""
sdb = cls(embedding, **kwargs)
await sdb.initialize()
await sdb.aadd_texts(texts, metadatas, **kwargs)
return sdb
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "SurrealDBStore":
"""Create SurrealDBStore from list of text
Args:
texts (List[str]): list of text to vectorize and store
embedding (Optional[Embeddings]): Embedding function.
dburl (str): SurrealDB connection url
ns (str): surrealdb namespace for the vector store.
(default: "langchain")
db (str): surrealdb database for the vector store.
(default: "database")
collection (str): surrealdb collection for the vector store.
(default: "documents")
(optional) db_user and db_pass: surrealdb credentials
Returns:
SurrealDBStore object initialized and ready for use."""
sdb = asyncio.run(cls.afrom_texts(texts, embedding, metadatas, **kwargs))
return sdb
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~mlflow.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from urllib.parse import urlparse
from libs.core.langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models import BaseChatModel
from libs.core.langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from libs.core.langchain_core.outputs import ChatGeneration, ChatResult
from libs.core.langchain_core.pydantic_v1 import (
Field,
PrivateAttr,
)
logger = logging.getLogger(__name__)
class ChatMlflow(BaseChatModel):
"""`MLflow` chat models API.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatMlflow
chat = ChatMlflow(
target_uri="http://localhost:5000",
endpoint="chat",
temperature-0.1,
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: dict = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
_client: Any = PrivateAttr()
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"target_uri": self.target_uri,
"endpoint": self.endpoint,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatMlflow._convert_message_to_dict(message) for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
"temperature": self.temperature,
"n": self.n,
**self.extra_params,
**kwargs,
}
if stop := self.stop or stop:
data["stop"] = stop
if self.max_tokens is not None:
data["max_tokens"] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return ChatMlflow._create_chat_result(resp)
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "mlflow-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatMlflow._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Databricks"
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["choices"]:
message = ChatMlflow._convert_dict_to_message(choice["message"])
usage = choice.get("usage", {})
gen = ChatGeneration(
message=message,
generation_info=usage,
)
generations.append(gen)
usage = response.get("usage", {})
return ChatResult(generations=generations, llm_output=usage)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_polars_dataframe.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders import PolarsDataFrameLoader
if TYPE_CHECKING:
import polars as pl
@pytest.fixture
def sample_data_frame() -> pl.DataFrame:
import polars as pl
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
return pl.DataFrame(data)
def test_load_returns_list_of_documents(sample_data_frame: pl.DataFrame) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
def test_load_converts_dataframe_columns_to_document_metadata(
sample_data_frame: pl.DataFrame,
) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
df: pl.DataFrame = sample_data_frame[i]
assert df is not None
assert doc.metadata["author"] == df.select("author").item()
assert doc.metadata["date"] == df.select("date").item()
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pl.DataFrame,
) -> None:
sample_data_frame = sample_data_frame.rename(mapping={"text": "dummy_test_column"})
loader = PolarsDataFrameLoader(
sample_data_frame, page_content_column="dummy_test_column"
)
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~gitbook.py | from typing import Any, List, Optional
from urllib.parse import urljoin, urlparse
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class GitbookLoader(WebBaseLoader):
"""Load `GitBook` data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
continue_on_failure: bool = False,
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page`.
content_selector: The CSS selector for the content to load.
Defaults to "main".
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
web_page = f"{self.base_url}/sitemap.xml"
super().__init__(web_paths=(web_page,), continue_on_failure=continue_on_failure)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
def load(self) -> List[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
_documents = [
self._get_document(soup_info, url)
for soup_info, url in zip(soup_infos, urls)
]
else:
soup_info = self.scrape()
_documents = [self._get_document(soup_info, self.web_path)]
documents = [d for d in _documents if d]
return documents
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_hippo.py | """Test Hippo functionality."""
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.hippo import Hippo
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _hippo_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Hippo:
return Hippo.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table_name="langchain_test",
connection_args={"host": "127.0.0.1", "port": 7788},
drop_old=drop,
)
def test_hippo_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=1)
print(output)
assert len(output) == 1
def test_hippo() -> None:
docsearch = _hippo_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hippo_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
Document(page_content="baz", metadata={"page": "2"}),
]
assert scores[0] < scores[1] < scores[2]
# if __name__ == "__main__":
# test_hippo()
# test_hippo_with_score()
# test_hippo_with_score()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~facebook_chat.py | import datetime
import json
from pathlib import Path
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
"""
sender = row["sender_name"]
text = row["content"]
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{sender} on {date}: {text}\n\n"
class FacebookChatLoader(BaseLoader):
"""Load `Facebook Chat` messages directory dump."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message.get("content") and isinstance(message["content"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~llmonitor_callback.py | import importlib.metadata
import logging
import os
import traceback
import warnings
from contextvars import ContextVar
from typing import Any, Dict, List, Union, cast
from uuid import UUID
import requests
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.messages import BaseMessage
from libs.core.langchain_core.outputs import LLMResult
from packaging.version import parse
logger = logging.getLogger(__name__)
DEFAULT_API_URL = "https://app.llmonitor.com"
user_ctx = ContextVar[Union[str, None]]("user_ctx", default=None)
user_props_ctx = ContextVar[Union[str, None]]("user_props_ctx", default=None)
PARAMS_TO_CAPTURE = [
"temperature",
"top_p",
"top_k",
"stop",
"presence_penalty",
"frequence_penalty",
"seed",
"function_call",
"functions",
"tools",
"tool_choice",
"response_format",
"max_tokens",
"logit_bias",
]
class UserContextManager:
"""Context manager for LLMonitor user context."""
def __init__(self, user_id: str, user_props: Any = None) -> None:
user_ctx.set(user_id)
user_props_ctx.set(user_props)
def __enter__(self) -> Any:
pass
def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> Any:
user_ctx.set(None)
user_props_ctx.set(None)
def identify(user_id: str, user_props: Any = None) -> UserContextManager:
"""Builds an LLMonitor UserContextManager
Parameters:
- `user_id`: The user id.
- `user_props`: The user properties.
Returns:
A context manager that sets the user context.
"""
return UserContextManager(user_id, user_props)
def _serialize(obj: Any) -> Union[Dict[str, Any], List[Any], Any]:
if hasattr(obj, "to_json"):
return obj.to_json()
if isinstance(obj, dict):
return {key: _serialize(value) for key, value in obj.items()}
if isinstance(obj, list):
return [_serialize(element) for element in obj]
return obj
def _parse_input(raw_input: Any) -> Any:
if not raw_input:
return None
# if it's an array of 1, just parse the first element
if isinstance(raw_input, list) and len(raw_input) == 1:
return _parse_input(raw_input[0])
if not isinstance(raw_input, dict):
return _serialize(raw_input)
input_value = raw_input.get("input")
inputs_value = raw_input.get("inputs")
question_value = raw_input.get("question")
query_value = raw_input.get("query")
if input_value:
return input_value
if inputs_value:
return inputs_value
if question_value:
return question_value
if query_value:
return query_value
return _serialize(raw_input)
def _parse_output(raw_output: dict) -> Any:
if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get("text")
output_value = raw_output.get("output")
output_text_value = raw_output.get("output_text")
answer_value = raw_output.get("answer")
result_value = raw_output.get("result")
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output)
def _parse_lc_role(
role: str,
) -> str:
if role == "human":
return "user"
else:
return role
def _get_user_id(metadata: Any) -> Any:
if user_ctx.get() is not None:
return user_ctx.get()
metadata = metadata or {}
user_id = metadata.get("user_id")
if user_id is None:
user_id = metadata.get("userId") # legacy, to delete in the future
return user_id
def _get_user_props(metadata: Any) -> Any:
if user_props_ctx.get() is not None:
return user_props_ctx.get()
metadata = metadata or {}
return metadata.get("user_props", None)
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
keys = ["function_call", "tool_calls", "tool_call_id", "name"]
parsed = {"text": message.content, "role": _parse_lc_role(message.type)}
parsed.update(
{
key: cast(Any, message.additional_kwargs.get(key))
for key in keys
if message.additional_kwargs.get(key) is not None
}
)
return parsed
def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) -> List[Dict[str, Any]]:
return [_parse_lc_message(message) for message in messages]
class LLMonitorCallbackHandler(BaseCallbackHandler):
"""Callback Handler for LLMonitor`.
#### Parameters:
- `app_id`: The app id of the app you want to report to. Defaults to
`None`, which means that `LLMONITOR_APP_ID` will be used.
- `api_url`: The url of the LLMonitor API. Defaults to `None`,
which means that either `LLMONITOR_API_URL` environment variable
or `https://app.llmonitor.com` will be used.
#### Raises:
- `ValueError`: if `app_id` is not provided either as an
argument or as an environment variable.
- `ConnectionError`: if the connection to the API fails.
#### Example:
```python
from langchain_community.llms import OpenAI
from langchain_community.callbacks import LLMonitorCallbackHandler
llmonitor_callback = LLMonitorCallbackHandler()
llm = OpenAI(callbacks=[llmonitor_callback],
metadata={"userId": "user-123"})
llm.predict("Hello, how are you?")
```
"""
__api_url: str
__app_id: str
__verbose: bool
__llmonitor_version: str
__has_valid_config: bool
def __init__(
self,
app_id: Union[str, None] = None,
api_url: Union[str, None] = None,
verbose: bool = False,
) -> None:
super().__init__()
self.__has_valid_config = True
try:
import llmonitor
self.__llmonitor_version = importlib.metadata.version("llmonitor")
self.__track_event = llmonitor.track_event
except ImportError:
logger.warning(
"""[LLMonitor] To use the LLMonitor callback handler you need to
have the `llmonitor` Python package installed. Please install it
with `pip install llmonitor`"""
)
self.__has_valid_config = False
return
if parse(self.__llmonitor_version) < parse("0.0.32"):
logger.warning(
f"""[LLMonitor] The installed `llmonitor` version is
{self.__llmonitor_version}
but `LLMonitorCallbackHandler` requires at least version 0.0.32
upgrade `llmonitor` with `pip install --upgrade llmonitor`"""
)
self.__has_valid_config = False
self.__has_valid_config = True
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
if _app_id is None:
logger.warning(
"""[LLMonitor] app_id must be provided either as an argument or
as an environment variable"""
)
self.__has_valid_config = False
else:
self.__app_id = _app_id
if self.__has_valid_config is False:
return None
try:
res = requests.get(f"{self.__api_url}/api/app/{self.__app_id}")
if not res.ok:
raise ConnectionError()
except Exception:
logger.warning(
f"""[LLMonitor] Could not connect to the LLMonitor API at
{self.__api_url}"""
)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get("invocation_params", {})
params.update(
serialized.get("kwargs", {})
) # Sometimes, for example with ChatAnthropic, `invocation_params` is empty
name = (
params.get("model")
or params.get("model_name")
or params.get("model_id")
)
if not name and "anthropic" in params.get("_type"):
name = "claude-2"
extra = {
param: params.get(param)
for param in PARAMS_TO_CAPTURE
if params.get(param) is not None
}
input = _parse_input(prompts)
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
extra=extra,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_llm_start: {e}")
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get("invocation_params", {})
params.update(
serialized.get("kwargs", {})
) # Sometimes, for example with ChatAnthropic, `invocation_params` is empty
name = (
params.get("model")
or params.get("model_name")
or params.get("model_id")
)
if not name and "anthropic" in params.get("_type"):
name = "claude-2"
extra = {
param: params.get(param)
for param in PARAMS_TO_CAPTURE
if params.get(param) is not None
}
input = _parse_lc_messages(messages[0])
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
extra=extra,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chat_model_start: {e}")
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
token_usage = (response.llm_output or {}).get("token_usage", {})
parsed_output: Any = [
_parse_lc_message(generation.message)
if hasattr(generation, "message")
else generation.text
for generation in response.generations[0]
]
# if it's an array of 1, just parse the first element
if len(parsed_output) == 1:
parsed_output = parsed_output[0]
self.__track_event(
"llm",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=parsed_output,
token_usage={
"prompt": token_usage.get("prompt_tokens"),
"completion": token_usage.get("completion_tokens"),
},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_llm_end: {e}")
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
name = serialized.get("name")
self.__track_event(
"tool",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input_str,
tags=tags,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_start: {e}")
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_end: {e}")
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = serialized.get("id", [None, None, None, None])[3]
type = "chain"
metadata = metadata or {}
agentName = metadata.get("agent_name")
if agentName is None:
agentName = metadata.get("agentName")
if name == "AgentExecutor" or name == "PlanAndExecute":
type = "agent"
if agentName is not None:
type = "agent"
name = agentName
if parent_run_id is not None:
type = "chain"
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
input = _parse_input(inputs)
self.__track_event(
type,
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
metadata=metadata,
user_props=user_props,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_start: {e}")
def on_chain_end(
self,
outputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(outputs)
self.__track_event(
"chain",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_end: {e}")
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = action.tool
input = _parse_input(action.tool_input)
self.__track_event(
"tool",
"start",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_agent_action: {e}")
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(finish.return_values)
self.__track_event(
"agent",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_agent_finish: {e}")
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"chain",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_chain_error: {e}")
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_tool_error: {e}")
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"llm",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
app_id=self.__app_id,
)
except Exception as e:
logger.error(f"[LLMonitor] An error occurred in on_llm_error: {e}")
__all__ = ["LLMonitorCallbackHandler", "identify"]
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_models~test_volcengine_maas.py | """Test volc engine maas chat model."""
from libs.core.langchain_core.callbacks import CallbackManager
from libs.core.langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from libs.core.langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
response = chat(
messages=[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = VolcEngineMaasChat(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(
messages=[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
],
stream=True,
callbacks=callback_manager,
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = VolcEngineMaasChat()
message = HumanMessage(content="Hi, how are you?")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Hi, how are you?",
"Hello!",
"Hello",
"How are you?"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~gcs_directory.py | from typing import Callable, List, Optional
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.gcs_file import GCSFileLoader
from langchain_community.utilities.vertexai import get_client_info
class GCSDirectoryLoader(BaseLoader):
"""Load from GCS directory."""
def __init__(
self,
project_name: str,
bucket: str,
prefix: str = "",
loader_func: Optional[Callable[[str], BaseLoader]] = None,
):
"""Initialize with bucket and key name.
Args:
project_name: The ID of the project for the GCS bucket.
bucket: The name of the GCS bucket.
prefix: The prefix of the GCS bucket.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the GCSFileLoader
would use its default loader.
"""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
self._loader_func = loader_func
def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(
project=self.project_name,
client_info=get_client_info(module="google-cloud-storage"),
)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(
self.project_name, self.bucket, blob.name, loader_func=self._loader_func
)
docs.extend(loader.load())
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~chat_message_histories~test_zep.py | from typing import TYPE_CHECKING
import pytest
from libs.core.langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore
"test_session"
)
| [
"message2",
"message",
"test message"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~rocksetdb.py | from __future__ import annotations
import logging
from enum import Enum
from typing import Any, Iterable, List, Optional, Tuple
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
logger = logging.getLogger(__name__)
class Rockset(VectorStore):
"""`Rockset` vector store.
To use, you should have the `rockset` python package installed. Note that to use
this, the collection being used must already exist in your Rockset instance.
You must also ensure you use a Rockset ingest transformation to apply
`VECTOR_ENFORCE` on the column being used to store `embedding_key` in the
collection.
See: https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details
Everything below assumes `commons` Rockset workspace.
Example:
.. code-block:: python
from langchain_community.vectorstores import Rockset
from langchain_community.embeddings.openai import OpenAIEmbeddings
import rockset
# Make sure you use the right host (region) for your Rockset instance
# and APIKEY has both read-write access to your collection.
rs = rockset.RocksetClient(host=rockset.Regions.use1a1, api_key="***")
collection_name = "langchain_demo"
embeddings = OpenAIEmbeddings()
vectorstore = Rockset(rs, collection_name, embeddings,
"description", "description_embedding")
"""
def __init__(
self,
client: Any,
embeddings: Embeddings,
collection_name: str,
text_key: str,
embedding_key: str,
workspace: str = "commons",
):
"""Initialize with Rockset client.
Args:
client: Rockset client object
collection: Rockset collection to insert docs / query
embeddings: Langchain Embeddings object to use to generate
embedding for given text.
text_key: column in Rockset collection to use to store the text
embedding_key: column in Rockset collection to use to store the embedding.
Note: We must apply `VECTOR_ENFORCE()` on this column via
Rockset ingest transformation.
"""
try:
from rockset import RocksetClient
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
if not isinstance(client, RocksetClient):
raise ValueError(
f"client should be an instance of rockset.RocksetClient, "
f"got {type(client)}"
)
# TODO: check that `collection_name` exists in rockset. Create if not.
self._client = client
self._collection_name = collection_name
self._embeddings = embeddings
self._text_key = text_key
self._embedding_key = embedding_key
self._workspace = workspace
try:
self._client.set_application("langchain")
except AttributeError:
# ignore
pass
@property
def embeddings(self) -> Embeddings:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
batch_size: Send documents in batches to rockset.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch: list[dict] = []
stored_ids = []
for i, text in enumerate(texts):
if len(batch) == batch_size:
stored_ids += self._write_documents_to_rockset(batch)
batch = []
doc = {}
if metadatas and len(metadatas) > i:
doc = metadatas[i]
if ids and len(ids) > i:
doc["_id"] = ids[i]
doc[self._text_key] = text
doc[self._embedding_key] = self._embeddings.embed_query(text)
batch.append(doc)
if len(batch) > 0:
stored_ids += self._write_documents_to_rockset(batch)
batch = []
return stored_ids
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Any = None,
collection_name: str = "",
text_key: str = "",
embedding_key: str = "",
ids: Optional[List[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> Rockset:
"""Create Rockset wrapper with existing texts.
This is intended as a quicker way to get started.
"""
# Sanitize inputs
assert client is not None, "Rockset Client cannot be None"
assert collection_name, "Collection name cannot be empty"
assert text_key, "Text key name cannot be empty"
assert embedding_key, "Embedding key cannot be empty"
rockset = cls(client, embedding, collection_name, text_key, embedding_key)
rockset.add_texts(texts, metadatas, ids, batch_size)
return rockset
# Rockset supports these vector distance functions.
class DistanceFunction(Enum):
COSINE_SIM = "COSINE_SIM"
EUCLIDEAN_DIST = "EUCLIDEAN_DIST"
DOT_PRODUCT = "DOT_PRODUCT"
# how to sort results for "similarity"
def order_by(self) -> str:
if self.value == "EUCLIDEAN_DIST":
return "ASC"
return "DESC"
def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
distance_func: DistanceFunction = DistanceFunction.COSINE_SIM,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with Rockset
Args:
query (str): Text to look up documents similar to.
distance_func (DistanceFunction): how to compute distance between two
vectors in Rockset.
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): Metadata filters supplied as a
SQL `where` condition string. Defaults to None.
eg. "price<=70.0 AND brand='Nintendo'"
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection.
Returns:
List[Tuple[Document, float]]: List of documents with their relevance score
"""
return self.similarity_search_by_vector_with_relevance_scores(
self._embeddings.embed_query(query),
k,
distance_func,
where_str,
**kwargs,
)
def similarity_search(
self,
query: str,
k: int = 4,
distance_func: DistanceFunction = DistanceFunction.COSINE_SIM,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Same as `similarity_search_with_relevance_scores` but
doesn't return the scores.
"""
return self.similarity_search_by_vector(
self._embeddings.embed_query(query),
k,
distance_func,
where_str,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
distance_func: DistanceFunction = DistanceFunction.COSINE_SIM,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Accepts a query_embedding (vector), and returns documents with
similar embeddings."""
docs_and_scores = self.similarity_search_by_vector_with_relevance_scores(
embedding, k, distance_func, where_str, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = 4,
distance_func: DistanceFunction = DistanceFunction.COSINE_SIM,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Accepts a query_embedding (vector), and returns documents with
similar embeddings along with their relevance scores."""
q_str = self._build_query_sql(embedding, distance_func, k, where_str)
try:
query_response = self._client.Queries.query(sql={"query": q_str})
except Exception as e:
logger.error("Exception when querying Rockset: %s\n", e)
return []
finalResult: list[Tuple[Document, float]] = []
for document in query_response.results:
metadata = {}
assert isinstance(
document, dict
), "document should be of type `dict[str,Any]`. But found: `{}`".format(
type(document)
)
for k, v in document.items():
if k == self._text_key:
assert isinstance(v, str), (
"page content stored in column `{}` must be of type `str`. "
"But found: `{}`"
).format(self._text_key, type(v))
page_content = v
elif k == "dist":
assert isinstance(v, float), (
"Computed distance between vectors must of type `float`. "
"But found {}"
).format(type(v))
score = v
elif k not in ["_id", "_event_time", "_meta"]:
# These columns are populated by Rockset when documents are
# inserted. No need to return them in metadata dict.
metadata[k] = v
finalResult.append(
(Document(page_content=page_content, metadata=metadata), score)
)
return finalResult
# Helper functions
def _build_query_sql(
self,
query_embedding: List[float],
distance_func: DistanceFunction,
k: int = 4,
where_str: Optional[str] = None,
) -> str:
"""Builds Rockset SQL query to query similar vectors to query_vector"""
q_embedding_str = ",".join(map(str, query_embedding))
distance_str = f"""{distance_func.value}({self._embedding_key}, \
[{q_embedding_str}]) as dist"""
where_str = f"WHERE {where_str}\n" if where_str else ""
return f"""\
SELECT * EXCEPT({self._embedding_key}), {distance_str}
FROM {self._workspace}.{self._collection_name}
{where_str}\
ORDER BY dist {distance_func.order_by()}
LIMIT {str(k)}
"""
def _write_documents_to_rockset(self, batch: List[dict]) -> List[str]:
add_doc_res = self._client.Documents.add_documents(
collection=self._collection_name, data=batch, workspace=self._workspace
)
return [doc_status._id for doc_status in add_doc_res.data]
def delete_texts(self, ids: List[str]) -> None:
"""Delete a list of docs from the Rockset collection"""
try:
from rockset.models import DeleteDocumentsRequestData
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
self._client.Documents.delete_documents(
collection=self._collection_name,
data=[DeleteDocumentsRequestData(id=i) for i in ids],
workspace=self._workspace,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~max_compute.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional, Sequence
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.max_compute import MaxComputeAPIWrapper
class MaxComputeLoader(BaseLoader):
"""Load from `Alibaba Cloud MaxCompute` table."""
def __init__(
self,
query: str,
api_wrapper: MaxComputeAPIWrapper,
*,
page_content_columns: Optional[Sequence[str]] = None,
metadata_columns: Optional[Sequence[str]] = None,
):
"""Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written.
"""
self.query = query
self.api_wrapper = api_wrapper
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
@classmethod
def from_params(
cls,
query: str,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
**kwargs: Any,
) -> MaxComputeLoader:
"""Convenience constructor that builds the MaxCompute API wrapper from
given parameters.
Args:
query: SQL query to execute.
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
api_wrapper = MaxComputeAPIWrapper.from_params(
endpoint, project, access_id=access_id, secret_access_key=secret_access_key
)
return cls(query, api_wrapper, **kwargs)
def lazy_load(self) -> Iterator[Document]:
for row in self.api_wrapper.query(self.query):
if self.page_content_columns:
page_content_data = {
k: v for k, v in row.items() if k in self.page_content_columns
}
else:
page_content_data = row
page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items())
if self.metadata_columns:
metadata = {k: v for k, v in row.items() if k in self.metadata_columns}
else:
metadata = {k: v for k, v in row.items() if k not in page_content_data}
yield Document(page_content=page_content, metadata=metadata)
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_redis.py | """Test Redis functionality."""
import os
from typing import Any, Dict, List, Optional
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.redis import (
Redis,
RedisFilter,
RedisNum,
RedisText,
)
from langchain_community.vectorstores.redis.filters import RedisFilterExpression
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
TEST_INDEX_NAME = "test"
TEST_REDIS_URL = "redis://localhost:6379"
TEST_SINGLE_RESULT = [Document(page_content="foo")]
TEST_SINGLE_WITH_METADATA = {"a": "b"}
TEST_RESULT = [Document(page_content="foo"), Document(page_content="foo")]
RANGE_SCORE = pytest.approx(0.0513, abs=0.002)
COSINE_SCORE = pytest.approx(0.05, abs=0.002)
IP_SCORE = -8.0
EUCLIDEAN_SCORE = 1.0
def drop(index_name: str) -> bool:
return Redis.drop_index(
index_name=index_name, delete_documents=True, redis_url=TEST_REDIS_URL
)
def convert_bytes(data: Any) -> Any:
if isinstance(data, bytes):
return data.decode("ascii")
if isinstance(data, dict):
return dict(map(convert_bytes, data.items()))
if isinstance(data, list):
return list(map(convert_bytes, data))
if isinstance(data, tuple):
return map(convert_bytes, data)
return data
def make_dict(values: List[Any]) -> dict:
i = 0
di = {}
while i < len(values) - 1:
di[values[i]] = values[i + 1]
i += 2
return di
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
def test_redis(texts: List[str]) -> None:
"""Test end to end construction and search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search("foo", k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert drop(docsearch.index_name)
def test_redis_new_vector(texts: List[str]) -> None:
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(docsearch.index_name)
def test_redis_from_existing(texts: List[str]) -> None:
"""Test adding a new document"""
docsearch = Redis.from_texts(
texts, FakeEmbeddings(), index_name=TEST_INDEX_NAME, redis_url=TEST_REDIS_URL
)
schema: Dict = docsearch.schema
# write schema for the next test
docsearch.write_schema("test_schema.yml")
# Test creating from an existing
docsearch2 = Redis.from_existing_index(
FakeEmbeddings(),
index_name=TEST_INDEX_NAME,
redis_url=TEST_REDIS_URL,
schema=schema,
)
output = docsearch2.similarity_search("foo", k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
def test_redis_add_texts_to_existing() -> None:
"""Test adding a new document"""
# Test creating from an existing with yaml from file
docsearch = Redis.from_existing_index(
FakeEmbeddings(),
index_name=TEST_INDEX_NAME,
redis_url=TEST_REDIS_URL,
schema="test_schema.yml",
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(TEST_INDEX_NAME)
# remove the test_schema.yml file
os.remove("test_schema.yml")
def test_redis_from_texts_return_keys(texts: List[str]) -> None:
"""Test from_texts_return_keys constructor."""
docsearch, keys = Redis.from_texts_return_keys(
texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL
)
output = docsearch.similarity_search("foo", k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert len(keys) == len(texts)
assert drop(docsearch.index_name)
def test_redis_from_documents(texts: List[str]) -> None:
"""Test from_documents constructor."""
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = Redis.from_documents(docs, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search("foo", k=1, return_metadata=True)
assert "a" in output[0].metadata.keys()
assert "b" in output[0].metadata.values()
assert drop(docsearch.index_name)
def test_custom_keys(texts: List[str]) -> None:
keys_in = ["test_key_1", "test_key_2", "test_key_3"]
docsearch, keys_out = Redis.from_texts_return_keys(
texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL, keys=keys_in
)
# it will append the index key prefix to all keys
assert keys_out == [docsearch.key_prefix + ":" + key for key in keys_in]
assert drop(docsearch.index_name)
def test_custom_keys_from_docs(texts: List[str]) -> None:
keys_in = ["test_key_1", "test_key_2", "test_key_3"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = Redis.from_documents(
docs, FakeEmbeddings(), redis_url=TEST_REDIS_URL, keys=keys_in
)
client = docsearch.client
# test keys are correct
assert client.hget(docsearch.key_prefix + ":" + "test_key_1", "content")
# test metadata is stored
assert client.hget(docsearch.key_prefix + ":" + "test_key_1", "a") == bytes(
"b", "utf-8"
)
# test all keys are stored
assert client.hget(docsearch.key_prefix + ":" + "test_key_2", "content")
assert drop(docsearch.index_name)
# -- test filters -- #
@pytest.mark.parametrize(
"filter_expr, expected_length, expected_nums",
[
(RedisText("text") == "foo", 1, None),
(RedisFilter.text("text") == "foo", 1, None),
(RedisText("text") % "ba*", 2, ["bar", "baz"]),
(RedisNum("num") > 2, 1, [3]),
(RedisNum("num") < 2, 1, [1]),
(RedisNum("num") >= 2, 2, [2, 3]),
(RedisNum("num") <= 2, 2, [1, 2]),
(RedisNum("num") != 2, 2, [1, 3]),
(RedisFilter.num("num") != 2, 2, [1, 3]),
(RedisFilter.tag("category") == "a", 3, None),
(RedisFilter.tag("category") == "b", 2, None),
(RedisFilter.tag("category") == "c", 2, None),
(RedisFilter.tag("category") == ["b", "c"], 3, None),
],
ids=[
"text-filter-equals-foo",
"alternative-text-equals-foo",
"text-filter-fuzzy-match-ba",
"number-filter-greater-than-2",
"number-filter-less-than-2",
"number-filter-greater-equals-2",
"number-filter-less-equals-2",
"number-filter-not-equals-2",
"alternative-number-not-equals-2",
"tag-filter-equals-a",
"tag-filter-equals-b",
"tag-filter-equals-c",
"tag-filter-equals-b-or-c",
],
)
def test_redis_filters_1(
filter_expr: RedisFilterExpression,
expected_length: int,
expected_nums: Optional[list],
) -> None:
metadata = [
{"name": "joe", "num": 1, "text": "foo", "category": ["a", "b"]},
{"name": "john", "num": 2, "text": "bar", "category": ["a", "c"]},
{"name": "jane", "num": 3, "text": "baz", "category": ["b", "c", "a"]},
]
documents = [Document(page_content="foo", metadata=m) for m in metadata]
docsearch = Redis.from_documents(
documents, FakeEmbeddings(), redis_url=TEST_REDIS_URL
)
sim_output = docsearch.similarity_search("foo", k=3, filter=filter_expr)
mmr_output = docsearch.max_marginal_relevance_search(
"foo", k=3, fetch_k=5, filter=filter_expr
)
assert len(sim_output) == expected_length
assert len(mmr_output) == expected_length
if expected_nums is not None:
for out in sim_output:
assert (
out.metadata["text"] in expected_nums
or int(out.metadata["num"]) in expected_nums
)
for out in mmr_output:
assert (
out.metadata["text"] in expected_nums
or int(out.metadata["num"]) in expected_nums
)
assert drop(docsearch.index_name)
# -- test index specification -- #
def test_index_specification_generation() -> None:
index_schema = {
"text": [{"name": "job"}, {"name": "title"}],
"numeric": [{"name": "salary"}],
}
text = ["foo"]
meta = {"job": "engineer", "title": "principal engineer", "salary": 100000}
docs = [Document(page_content=t, metadata=meta) for t in text]
r = Redis.from_documents(
docs, FakeEmbeddings(), redis_url=TEST_REDIS_URL, index_schema=index_schema
)
output = r.similarity_search("foo", k=1, return_metadata=True)
assert output[0].metadata["job"] == "engineer"
assert output[0].metadata["title"] == "principal engineer"
assert int(output[0].metadata["salary"]) == 100000
info = convert_bytes(r.client.ft(r.index_name).info())
attributes = info["attributes"]
assert len(attributes) == 5
for attr in attributes:
d = make_dict(attr)
if d["identifier"] == "job":
assert d["type"] == "TEXT"
elif d["identifier"] == "title":
assert d["type"] == "TEXT"
elif d["identifier"] == "salary":
assert d["type"] == "NUMERIC"
elif d["identifier"] == "content":
assert d["type"] == "TEXT"
elif d["identifier"] == "content_vector":
assert d["type"] == "VECTOR"
else:
raise ValueError("Unexpected attribute in index schema")
assert drop(r.index_name)
# -- test distance metrics -- #
cosine_schema: Dict = {"distance_metric": "cosine"}
ip_schema: Dict = {"distance_metric": "IP"}
l2_schema: Dict = {"distance_metric": "L2"}
def test_cosine(texts: List[str]) -> None:
"""Test cosine distance."""
docsearch = Redis.from_texts(
texts,
FakeEmbeddings(),
redis_url=TEST_REDIS_URL,
vector_schema=cosine_schema,
)
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == COSINE_SCORE
assert drop(docsearch.index_name)
def test_l2(texts: List[str]) -> None:
"""Test Flat L2 distance."""
docsearch = Redis.from_texts(
texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL, vector_schema=l2_schema
)
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == EUCLIDEAN_SCORE
assert drop(docsearch.index_name)
def test_ip(texts: List[str]) -> None:
"""Test inner product distance."""
docsearch = Redis.from_texts(
texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL, vector_schema=ip_schema
)
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == IP_SCORE
assert drop(docsearch.index_name)
def test_similarity_search_limit_distance(texts: List[str]) -> None:
"""Test similarity search limit score."""
docsearch = Redis.from_texts(
texts,
FakeEmbeddings(),
redis_url=TEST_REDIS_URL,
)
output = docsearch.similarity_search(texts[0], k=3, distance_threshold=0.1)
# can't check score but length of output should be 2
assert len(output) == 2
assert drop(docsearch.index_name)
def test_similarity_search_with_score_with_limit_distance(texts: List[str]) -> None:
"""Test similarity search with score with limit score."""
docsearch = Redis.from_texts(
texts, ConsistentFakeEmbeddings(), redis_url=TEST_REDIS_URL
)
output = docsearch.similarity_search_with_score(
texts[0], k=3, distance_threshold=0.1, return_metadata=True
)
assert len(output) == 2
for out, score in output:
if out.page_content == texts[1]:
score == COSINE_SCORE
assert drop(docsearch.index_name)
def test_max_marginal_relevance_search(texts: List[str]) -> None:
"""Test max marginal relevance search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3)
sim_output = docsearch.similarity_search(texts[0], k=3)
assert mmr_output == sim_output
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[1]
mmr_output = docsearch.max_marginal_relevance_search(
texts[0],
k=2,
fetch_k=3,
lambda_mult=0.1, # more diversity
)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[2]
# if fetch_k < k, then the output will be less than k
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2)
assert len(mmr_output) == 2
assert drop(docsearch.index_name)
def test_delete(texts: List[str]) -> None:
"""Test deleting a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
ids = docsearch.add_texts(["foo"])
got = docsearch.delete(ids=ids, redis_url=TEST_REDIS_URL)
assert got
assert drop(docsearch.index_name)
def test_redis_as_retriever() -> None:
texts = ["foo", "foo", "foo", "foo", "bar"]
docsearch = Redis.from_texts(
texts, ConsistentFakeEmbeddings(), redis_url=TEST_REDIS_URL
)
retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k": 3})
results = retriever.get_relevant_documents("foo")
assert len(results) == 3
assert all([d.page_content == "foo" for d in results])
assert drop(docsearch.index_name)
def test_redis_retriever_distance_threshold() -> None:
texts = ["foo", "bar", "baz"]
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
retriever = docsearch.as_retriever(
search_type="similarity_distance_threshold",
search_kwargs={"k": 3, "distance_threshold": 0.1},
)
results = retriever.get_relevant_documents("foo")
assert len(results) == 2
assert drop(docsearch.index_name)
def test_redis_retriever_score_threshold() -> None:
texts = ["foo", "bar", "baz"]
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.91},
)
results = retriever.get_relevant_documents("foo")
assert len(results) == 2
assert drop(docsearch.index_name)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~document_loaders~docusaurus.py | from langchain_community.document_loaders.docusaurus import DocusaurusLoader
__all__ = ["DocusaurusLoader"]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~yellowbrick.py | from __future__ import annotations
import json
import logging
import uuid
import warnings
from itertools import repeat
from typing import (
Any,
Iterable,
List,
Optional,
Tuple,
Type,
)
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.docstore.document import Document
logger = logging.getLogger(__name__)
class Yellowbrick(VectorStore):
"""Wrapper around Yellowbrick as a vector database.
Example:
.. code-block:: python
from langchain_community.vectorstores import Yellowbrick
from langchain_community.embeddings.openai import OpenAIEmbeddings
...
"""
def __init__(
self,
embedding: Embeddings,
connection_string: str,
table: str,
) -> None:
"""Initialize with yellowbrick client.
Args:
embedding: Embedding operator
connection_string: Format 'postgres://username:password@host:port/database'
table: Table used to store / retrieve embeddings from
"""
import psycopg2
if not isinstance(embedding, Embeddings):
warnings.warn("embeddings input must be Embeddings object.")
self.connection_string = connection_string
self._table = table
self._embedding = embedding
self._connection = psycopg2.connect(connection_string)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""Initialize the store."""
self.check_database_utf8()
self.create_table_if_not_exists()
def __del__(self) -> None:
if self._connection:
self._connection.close()
def create_table_if_not_exists(self) -> None:
"""
Helper function: create table if not exists
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(
sql.SQL(
"CREATE TABLE IF NOT EXISTS {} ( \
id UUID, \
embedding_id INTEGER, \
text VARCHAR(60000), \
metadata VARCHAR(1024), \
embedding FLOAT)"
).format(sql.Identifier(self._table))
)
self._connection.commit()
cursor.close()
def drop(self, table: str) -> None:
"""
Helper function: Drop data
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL("DROP TABLE IF EXISTS {}").format(sql.Identifier(table)))
self._connection.commit()
cursor.close()
def check_database_utf8(self) -> bool:
"""
Helper function: Test the database is UTF-8 encoded
"""
cursor = self._connection.cursor()
query = "SELECT pg_encoding_to_char(encoding) \
FROM pg_database \
WHERE datname = current_database();"
cursor.execute(query)
encoding = cursor.fetchone()[0]
cursor.close()
if encoding.lower() == "utf8" or encoding.lower() == "utf-8":
return True
else:
raise Exception(
f"Database \
'{self.connection_string.split('/')[-1]}' encoding is not UTF-8"
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
from psycopg2 import sql
texts = list(texts)
cursor = self._connection.cursor()
embeddings = self._embedding.embed_documents(list(texts))
results = []
if not metadatas:
metadatas = [{} for _ in texts]
for id in range(len(embeddings)):
doc_uuid = uuid.uuid4()
results.append(str(doc_uuid))
data_input = [
(str(id), embedding_id, text, json.dumps(metadata), embedding)
for id, embedding_id, text, metadata, embedding in zip(
repeat(doc_uuid),
range(len(embeddings[id])),
repeat(texts[id]),
repeat(metadatas[id]),
embeddings[id],
)
]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
"INSERT INTO {t} \
(id, embedding_id, text, metadata, embedding) VALUES {v}"
).format(
t=sql.Identifier(self._table),
v=(
sql.SQL(",").join(
[
sql.SQL("(%s,%s,%s,%s,%s)")
for _ in range(len(embeddings[id]))
]
)
),
)
cursor.execute(insert_query, flattened_input)
self._connection.commit()
return results
@classmethod
def from_texts(
cls: Type[Yellowbrick],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
connection_string: str = "",
table: str = "langchain",
**kwargs: Any,
) -> Yellowbrick:
"""Add texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
connection_string: URI to Yellowbrick instance
embedding: Embedding function
table: table to store embeddings
kwargs: vectorstore specific parameters
"""
if connection_string is None:
raise ValueError("connection_string must be provided")
vss = cls(
embedding=embedding,
connection_string=connection_string,
table=table,
)
vss.add_texts(texts=texts, metadatas=metadatas)
return vss
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with Yellowbrick with vector
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document, float]: List of Documents and scores
"""
from psycopg2 import sql
cursor = self._connection.cursor()
tmp_table = "tmp_" + self._table
cursor.execute(
sql.SQL(
"CREATE TEMPORARY TABLE {} ( \
embedding_id INTEGER, embedding FLOAT)"
).format(sql.Identifier(tmp_table))
)
self._connection.commit()
data_input = [
(embedding_id, embedding)
for embedding_id, embedding in zip(range(len(embedding)), embedding)
]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
"INSERT INTO {t} \
(embedding_id, embedding) VALUES {v}"
).format(
t=sql.Identifier(tmp_table),
v=sql.SQL(",").join([sql.SQL("(%s,%s)") for _ in range(len(embedding))]),
)
cursor.execute(insert_query, flattened_input)
self._connection.commit()
sql_query = sql.SQL(
"SELECT text, \
metadata, \
sum(v1.embedding * v2.embedding) / \
( sqrt(sum(v1.embedding * v1.embedding)) * \
sqrt(sum(v2.embedding * v2.embedding))) AS score \
FROM {v1} v1 INNER JOIN {v2} v2 \
ON v1.embedding_id = v2.embedding_id \
GROUP BY v2.id, v2.text, v2.metadata \
ORDER BY score DESC \
LIMIT %s"
).format(v1=sql.Identifier(tmp_table), v2=sql.Identifier(self._table))
cursor.execute(sql_query, (k,))
results = cursor.fetchall()
self.drop(tmp_table)
documents: List[Tuple[Document, float]] = []
for result in results:
metadata = json.loads(result[1]) or {}
doc = Document(page_content=result[0], metadata=metadata)
documents.append((doc, result[2]))
cursor.close()
return documents
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of Documents
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return [doc for doc, _ in documents]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of (Document, similarity)
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return documents
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with Yellowbrick by vectors
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of documents
"""
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return [doc for doc, _ in documents]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~concurrent.py | from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import (
BlobLoader,
FileSystemBlobLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.registry import get_parser
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
class ConcurrentLoader(GenericLoader):
"""Load and pars Documents concurrently."""
def __init__(
self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, num_workers: int = 4
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs()
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
@classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
parser_kwargs: Optional[dict] = None,
) -> ConcurrentLoader:
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader(
path,
glob=glob,
exclude=exclude,
suffixes=suffixes,
show_progress=show_progress,
)
if isinstance(parser, str):
if parser == "default" and cls.get_parser != GenericLoader.get_parser:
# There is an implementation of get_parser on the class, use it.
blob_parser = cls.get_parser(**(parser_kwargs or {}))
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~agent_toolkits~powerbi~toolkit.py | """Toolkit for interacting with a Power BI dataset."""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Union
from libs.core.langchain_core.callbacks import BaseCallbackManager
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.prompts import PromptTemplate
from libs.core.langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from libs.core.langchain_core.pydantic_v1 import Field
from langchain_community.agent_toolkits.base import BaseToolkit
from langchain_community.tools import BaseTool
from langchain_community.tools.powerbi.prompt import (
QUESTION_TO_QUERY_BASE,
SINGLE_QUESTION_TO_QUERY,
USER_INPUT,
)
from langchain_community.tools.powerbi.tool import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
from langchain_community.utilities.powerbi import PowerBIDataset
if TYPE_CHECKING:
from langchain.chains.llm import LLMChain
class PowerBIToolkit(BaseToolkit):
"""Toolkit for interacting with Power BI dataset.
*Security Note*: This toolkit interacts with an external service.
Control access to who can use this toolkit.
Make sure that the capabilities given by this toolkit to the calling
code are appropriately scoped to the application.
See https://python.langchain.com/docs/security for more information.
"""
powerbi: PowerBIDataset = Field(exclude=True)
llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True)
examples: Optional[str] = None
max_iterations: int = 5
callback_manager: Optional[BaseCallbackManager] = None
output_token_limit: Optional[int] = None
tiktoken_model_name: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QueryPowerBITool(
llm_chain=self._get_chain(),
powerbi=self.powerbi,
examples=self.examples,
max_iterations=self.max_iterations,
output_token_limit=self.output_token_limit,
tiktoken_model_name=self.tiktoken_model_name,
),
InfoPowerBITool(powerbi=self.powerbi),
ListPowerBITool(powerbi=self.powerbi),
]
def _get_chain(self) -> LLMChain:
"""Construct the chain based on the callback manager and model type."""
from langchain.chains.llm import LLMChain
if isinstance(self.llm, BaseLanguageModel):
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager
if self.callback_manager
else None,
prompt=PromptTemplate(
template=SINGLE_QUESTION_TO_QUERY,
input_variables=["tool_input", "tables", "schemas", "examples"],
),
)
system_prompt = SystemMessagePromptTemplate(
prompt=PromptTemplate(
template=QUESTION_TO_QUERY_BASE,
input_variables=["tables", "schemas", "examples"],
)
)
human_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=USER_INPUT,
input_variables=["tool_input"],
)
)
return LLMChain(
llm=self.llm,
callback_manager=self.callback_manager if self.callback_manager else None,
prompt=ChatPromptTemplate.from_messages([system_prompt, human_prompt]),
)
| [
"tool_input",
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~aleph_alpha.py | from typing import Any, Dict, List, Optional, Sequence
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain_community.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["aleph_alpha_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY")
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=values["aleph_alpha_api_key"].get_secret_value(),
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import List
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders import PubMedLoader
xmltodict = pytest.importorskip("xmltodict")
def test_load_success() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt")
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 3
assert_docs(docs)
def test_load_success_load_max_docs() -> None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 2
assert_docs(docs)
def test_load_returns_no_result() -> None:
"""Test that gives no result."""
api_client = PubMedLoader(query="1605.08386WWW")
docs = api_client.load()
assert len(docs) == 0
def test_load_no_content() -> None:
"""Returns a Document without content."""
api_client = PubMedLoader(query="37548971")
docs = api_client.load()
print(docs)
assert len(docs) > 0
assert docs[0].page_content == ""
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~diffbot.py | import logging
from typing import Any, List
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class DiffbotLoader(BaseLoader):
"""Load `Diffbot` json file."""
def __init__(
self, api_token: str, urls: List[str], continue_on_failure: bool = True
):
"""Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
"""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
def _diffbot_api_url(self, diffbot_api: str) -> str:
return f"https://api.diffbot.com/v3/{diffbot_api}"
def _get_diffbot_data(self, url: str) -> Any:
"""Get Diffbot file from Diffbot REST API."""
# TODO: Add support for other Diffbot APIs
diffbot_url = self._diffbot_api_url("article")
params = {
"token": self.api_token,
"url": url,
}
response = requests.get(diffbot_url, params=params, timeout=10)
# TODO: handle non-ok errors
return response.json() if response.ok else {}
def load(self) -> List[Document]:
"""Extract text from Diffbot on all the URLs and return Documents"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data["objects"][0]["text"] if "objects" in data else ""
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
return docs
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~openai_functions~tagging.py | from typing import Any, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import ChatPromptTemplate
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs
from langchain.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
PydanticOutputFunctionsParser,
)
def _get_tagging_function(schema: dict) -> dict:
return {
"name": "information_extraction",
"description": "Extracts the relevant information from the passage.",
"parameters": _convert_schema(schema),
}
_TAGGING_TEMPLATE = """Extract the desired information from the following passage.
Only extract the properties mentioned in the 'information_extraction' function.
Passage:
{input}
"""
def create_tagging_chain(
schema: dict,
llm: BaseLanguageModel,
prompt: Optional[ChatPromptTemplate] = None,
**kwargs: Any,
) -> Chain:
"""Creates a chain that extracts information from a passage
based on a schema.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
function = _get_tagging_function(schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = JsonOutputFunctionsParser()
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
**kwargs,
)
return chain
def create_tagging_chain_pydantic(
pydantic_schema: Any,
llm: BaseLanguageModel,
prompt: Optional[ChatPromptTemplate] = None,
**kwargs: Any,
) -> Chain:
"""Creates a chain that extracts information from a passage
based on a pydantic schema.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
openai_schema = pydantic_schema.schema()
function = _get_tagging_function(openai_schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
**kwargs,
)
return chain
| [
"information_extraction",
"Extract the desired information from the following passage.\n\nOnly extract the properties mentioned in the 'information_extraction' function.\n\nPassage:\n{input}\n"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~modal.py | import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Modal(LLM):
"""Modal large language models.
To use, you should have the ``modal-client`` python package installed.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import Modal
modal = Modal(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "modal"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Modal endpoint."""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response = requests.post(
url=self.endpoint_url,
headers={
"Content-Type": "application/json",
},
json={"prompt": prompt, **params},
)
try:
if prompt in response.json()["prompt"]:
response_json = response.json()
except KeyError:
raise KeyError("LangChain requires 'prompt' key in response.")
text = response_json["prompt"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~tomarkdown.py | from __future__ import annotations
from typing import Iterator, List
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ToMarkdownLoader(BaseLoader):
"""Load `HTML` using `2markdown API`."""
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://2markdown.com/api/2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load file."""
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~agents~format_scratchpad~test_openai_tools.py | from libs.core.langchain_core.messages import AIMessage, ToolMessage
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import (
parse_ai_message_to_openai_tool_action,
)
def test_calls_convert_agent_action_to_messages() -> None:
additional_kwargs1 = {
"tool_calls": [
{
"id": "call_abcd12345",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "add"},
"type": "function",
}
],
}
message1 = AIMessage(content="", additional_kwargs=additional_kwargs1)
actions1 = parse_ai_message_to_openai_tool_action(message1)
additional_kwargs2 = {
"tool_calls": [
{
"id": "call_abcd54321",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "subtract"},
"type": "function",
}
],
}
message2 = AIMessage(content="", additional_kwargs=additional_kwargs2)
actions2 = parse_ai_message_to_openai_tool_action(message2)
additional_kwargs3 = {
"tool_calls": [
{
"id": "call_abcd67890",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "multiply"},
"type": "function",
},
{
"id": "call_abcd09876",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "divide"},
"type": "function",
},
],
}
message3 = AIMessage(content="", additional_kwargs=additional_kwargs3)
actions3 = parse_ai_message_to_openai_tool_action(message3)
# for mypy
assert isinstance(actions1, list)
assert isinstance(actions2, list)
assert isinstance(actions3, list)
intermediate_steps = [
(actions1[0], "observation1"),
(actions2[0], "observation2"),
(actions3[0], "observation3"),
(actions3[1], "observation4"),
]
expected_messages = [
message1,
ToolMessage(
tool_call_id="call_abcd12345",
content="observation1",
additional_kwargs={"name": "add"},
),
message2,
ToolMessage(
tool_call_id="call_abcd54321",
content="observation2",
additional_kwargs={"name": "subtract"},
),
message3,
ToolMessage(
tool_call_id="call_abcd67890",
content="observation3",
additional_kwargs={"name": "multiply"},
),
ToolMessage(
tool_call_id="call_abcd09876",
content="observation4",
additional_kwargs={"name": "divide"},
),
]
output = format_to_openai_tool_messages(intermediate_steps)
assert output == expected_messages
def test_handles_empty_input_list() -> None:
output = format_to_openai_tool_messages([])
assert output == []
| [
"observation2",
"observation3",
"observation1",
"observation4"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~annoy.py | from __future__ import annotations
import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.docstore.base import Docstore
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"])
DEFAULT_METRIC = "angular"
def dependable_annoy_import() -> Any:
"""Import annoy if available, otherwise raise error."""
try:
import annoy
except ImportError:
raise ImportError(
"Could not import annoy python package. "
"Please install it with `pip install --user annoy` "
)
return annoy
class Annoy(VectorStore):
"""`Annoy` vector store.
To use, you should have the ``annoy`` python package installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
db = Annoy(embedding_function, index, docstore, index_to_docstore_id)
"""
def __init__(
self,
embedding_function: Callable,
index: Any,
metric: str,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.metric = metric
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
@property
def embeddings(self) -> Optional[Embeddings]:
# TODO: Accept embedding object directly
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
raise NotImplementedError(
"Annoy does not allow to add new data once the index is build."
)
def process_index_results(
self, idxs: List[int], dists: List[float]
) -> List[Tuple[Document, float]]:
"""Turns annoy results into a list of documents and scores.
Args:
idxs: List of indices of the documents in the index.
dists: List of distances of the documents in the index.
Returns:
List of Documents and scores.
"""
docs = []
for idx, dist in zip(idxs, dists):
_id = self.index_to_docstore_id[idx]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append((doc, dist))
return docs
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_vector(
embedding, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(
docstore_index, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score(
self, query: str, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k, search_k)
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(
docstore_index, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, search_k)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(
embedding, fetch_k, search_k=-1, include_distances=False
)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
# ignore the -1's if not enough docs are returned/indexed
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append(doc)
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
if metric not in INDEX_METRICS:
raise ValueError(
(
f"Unsupported distance metric: {metric}. "
f"Expected one of {list(INDEX_METRICS)}"
)
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError("embeddings must be provided to build AnnoyIndex")
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore(
{index_to_id[i]: doc for i, doc in enumerate(documents)}
)
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from embeddings.
Args:
text_embeddings: List of tuples of (text, embedding)
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1
This is a user friendly interface that:
1. Creates an in memory docstore with provided embeddings
2. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
def save_local(self, folder_path: str, prefault: bool = False) -> None:
"""Save Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
prefault: Whether to pre-load the index into memory.
"""
path = Path(folder_path)
os.makedirs(path, exist_ok=True)
# save index, index config, docstore and index_to_docstore_id
config_object = ConfigParser()
config_object["ANNOY"] = {
"f": self.index.f,
"metric": self.metric,
}
self.index.save(str(path / "index.annoy"), prefault=prefault)
with open(path / "index.pkl", "wb") as file:
pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
@classmethod
def load_local(
cls,
folder_path: str,
embeddings: Embeddings,
) -> Annoy:
"""Load Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries.
"""
path = Path(folder_path)
# load index separately since it is not picklable
annoy = dependable_annoy_import()
# load docstore and index_to_docstore_id
with open(path / "index.pkl", "rb") as file:
docstore, index_to_docstore_id, config_object = pickle.load(file)
f = int(config_object["ANNOY"]["f"])
metric = config_object["ANNOY"]["metric"]
index = annoy.AnnoyIndex(f, metric=metric)
index.load(str(path / "index.annoy"))
return cls(
embeddings.embed_query, index, metric, docstore, index_to_docstore_id
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~self_hosted.py | from typing import Any, Callable, List
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain_community.llms.self_hosted import SelfHostedPipeline
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain_community.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain_community.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~sklearn.py | """ Wrapper around scikit-learn NearestNeighbors implementation.
The vector store can be persisted in json, bson or parquet format.
"""
import json
import math
import os
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Type
from uuid import uuid4
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import guard_import
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
DEFAULT_K = 4 # Number of Documents to return.
DEFAULT_FETCH_K = 20 # Number of Documents to initially fetch during MMR search.
class BaseSerializer(ABC):
"""Base class for serializing data."""
def __init__(self, persist_path: str) -> None:
self.persist_path = persist_path
@classmethod
@abstractmethod
def extension(cls) -> str:
"""The file extension suggested by this serializer (without dot)."""
@abstractmethod
def save(self, data: Any) -> None:
"""Saves the data to the persist_path"""
@abstractmethod
def load(self) -> Any:
"""Loads the data from the persist_path"""
class JsonSerializer(BaseSerializer):
"""Serializes data in json using the json package from python standard library."""
@classmethod
def extension(cls) -> str:
return "json"
def save(self, data: Any) -> None:
with open(self.persist_path, "w") as fp:
json.dump(data, fp)
def load(self) -> Any:
with open(self.persist_path, "r") as fp:
return json.load(fp)
class BsonSerializer(BaseSerializer):
"""Serializes data in binary json using the `bson` python package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.bson = guard_import("bson")
@classmethod
def extension(cls) -> str:
return "bson"
def save(self, data: Any) -> None:
with open(self.persist_path, "wb") as fp:
fp.write(self.bson.dumps(data))
def load(self) -> Any:
with open(self.persist_path, "rb") as fp:
return self.bson.loads(fp.read())
class ParquetSerializer(BaseSerializer):
"""Serializes data in `Apache Parquet` format using the `pyarrow` package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.pd = guard_import("pandas")
self.pa = guard_import("pyarrow")
self.pq = guard_import("pyarrow.parquet")
@classmethod
def extension(cls) -> str:
return "parquet"
def save(self, data: Any) -> None:
df = self.pd.DataFrame(data)
table = self.pa.Table.from_pandas(df)
if os.path.exists(self.persist_path):
backup_path = str(self.persist_path) + "-backup"
os.rename(self.persist_path, backup_path)
try:
self.pq.write_table(table, self.persist_path)
except Exception as exc:
os.rename(backup_path, self.persist_path)
raise exc
else:
os.remove(backup_path)
else:
self.pq.write_table(table, self.persist_path)
def load(self) -> Any:
table = self.pq.read_table(self.persist_path)
df = table.to_pandas()
return {col: series.tolist() for col, series in df.items()}
SERIALIZER_MAP: Dict[str, Type[BaseSerializer]] = {
"json": JsonSerializer,
"bson": BsonSerializer,
"parquet": ParquetSerializer,
}
class SKLearnVectorStoreException(RuntimeError):
"""Exception raised by SKLearnVectorStore."""
pass
class SKLearnVectorStore(VectorStore):
"""Simple in-memory vector store based on the `scikit-learn` library
`NearestNeighbors`."""
def __init__(
self,
embedding: Embeddings,
*,
persist_path: Optional[str] = None,
serializer: Literal["json", "bson", "parquet"] = "json",
metric: str = "cosine",
**kwargs: Any,
) -> None:
np = guard_import("numpy")
sklearn_neighbors = guard_import("sklearn.neighbors", pip_name="scikit-learn")
# non-persistent properties
self._np = np
self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs)
self._neighbors_fitted = False
self._embedding_function = embedding
self._persist_path = persist_path
self._serializer: Optional[BaseSerializer] = None
if self._persist_path is not None:
serializer_cls = SERIALIZER_MAP[serializer]
self._serializer = serializer_cls(persist_path=self._persist_path)
# data properties
self._embeddings: List[List[float]] = []
self._texts: List[str] = []
self._metadatas: List[dict] = []
self._ids: List[str] = []
# cache properties
self._embeddings_np: Any = np.asarray([])
if self._persist_path is not None and os.path.isfile(self._persist_path):
self._load()
@property
def embeddings(self) -> Embeddings:
return self._embedding_function
def persist(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to persist the "
"collection."
)
data = {
"ids": self._ids,
"texts": self._texts,
"metadatas": self._metadatas,
"embeddings": self._embeddings,
}
self._serializer.save(data)
def _load(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to load the " "collection."
)
data = self._serializer.load()
self._embeddings = data["embeddings"]
self._texts = data["texts"]
self._metadatas = data["metadatas"]
self._ids = data["ids"]
self._update_neighbors()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
_texts = list(texts)
_ids = ids or [str(uuid4()) for _ in _texts]
self._texts.extend(_texts)
self._embeddings.extend(self._embedding_function.embed_documents(_texts))
self._metadatas.extend(metadatas or ([{}] * len(_texts)))
self._ids.extend(_ids)
self._update_neighbors()
return _ids
def _update_neighbors(self) -> None:
if len(self._embeddings) == 0:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
self._embeddings_np = self._np.asarray(self._embeddings)
self._neighbors.fit(self._embeddings_np)
self._neighbors_fitted = True
def _similarity_index_search_with_score(
self, query_embedding: List[float], *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[int, float]]:
"""Search k embeddings similar to the query embedding. Returns a list of
(index, distance) tuples."""
if not self._neighbors_fitted:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
neigh_dists, neigh_idxs = self._neighbors.kneighbors(
[query_embedding], n_neighbors=k
)
return list(zip(neigh_idxs[0], neigh_dists[0]))
def similarity_search_with_score(
self, query: str, *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
query_embedding = self._embedding_function.embed_query(query)
indices_dists = self._similarity_index_search_with_score(
query_embedding, k=k, **kwargs
)
return [
(
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
),
dist,
)
for idx, dist in indices_dists
]
def similarity_search(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Document]:
docs_scores = self.similarity_search_with_score(query, k=k, **kwargs)
return [doc for doc, _ in docs_scores]
def _similarity_search_with_relevance_scores(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
docs_dists = self.similarity_search_with_score(query, k=k, **kwargs)
docs, dists = zip(*docs_dists)
scores = [1 / math.exp(dist) for dist in dists]
return list(zip(list(docs), scores))
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
indices_dists = self._similarity_index_search_with_score(
embedding, k=fetch_k, **kwargs
)
indices, _ = zip(*indices_dists)
result_embeddings = self._embeddings_np[indices,]
mmr_selected = maximal_marginal_relevance(
self._np.array(embedding, dtype=self._np.float32),
result_embeddings,
k=k,
lambda_mult=lambda_mult,
)
mmr_indices = [indices[i] for i in mmr_selected]
return [
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
)
for idx in mmr_indices
]
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mul=lambda_mult
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
persist_path: Optional[str] = None,
**kwargs: Any,
) -> "SKLearnVectorStore":
vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs)
vs.add_texts(texts, metadatas=metadatas, ids=ids)
return vs
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~test_usearch.py | """Test USearch functionality."""
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.usearch import USearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_usearch_from_texts() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_usearch_from_documents() -> None:
"""Test from_documents constructor."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = USearch.from_documents(docs, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
def test_usearch_add_texts() -> None:
"""Test adding a new document"""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
def test_ip() -> None:
"""Test inner product distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="ip")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == -8.0
def test_l2() -> None:
"""Test Flat L2 distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="l2_sq")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == 1.0
def test_cos() -> None:
"""Test cosine distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="cos")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == pytest.approx(0.05, abs=0.002)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~office365~messages_search.py | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default=None,
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:[email protected], bcc:[email protected], body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small messages, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool):
"""Class for searching email messages in Office 365
Free, but setup is required
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview[:truncate_limit]
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime(UTC_FORMAT)
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~bittensor.py | import http.client
import json
import ssl
from typing import Any, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
class NIBittensorLLM(LLM):
"""NIBittensor LLMs
NIBittensorLLM is created by Neural Internet (https://neuralinternet.ai/),
powered by Bittensor, a decentralized network full of different AI models.
To analyze API_KEYS and logs of your usage visit
https://api.neuralinternet.ai/api-keys
https://api.neuralinternet.ai/logs
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM()
"""
system_prompt: Optional[str]
"""Provide system prompt that you want to supply it to model before every prompt"""
top_responses: Optional[int] = 0
"""Provide top_responses to get Top N miner responses on one request.May get delayed
Don't use in Production"""
@property
def _llm_type(self) -> str:
return "NIBittensorLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with \
5+ years of experience.")
"""
# Creating HTTPS connection with SSL
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection("test.neuralinternet.ai", context=context)
# Sanitizing User Input before passing to API.
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = "You are an assistant which is created by Neural Internet(NI) \
in decentralized network named as a Bittensor."
if self.system_prompt is None:
system_prompt = (
default_prompt
+ " Your task is to provide accurate response based on user prompt"
)
else:
system_prompt = default_prompt + str(self.system_prompt)
# Retrieving API KEY to pass into header of each request
conn.request("GET", "/admin/api-keys/")
api_key_response = conn.getresponse()
api_keys_data = (
api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]["api_key"]
# Creating Header and getting top benchmark miner uids
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"Endpoint-Version": "2023-05-19",
}
conn.request("GET", "/top_miner_uids", headers=headers)
miner_response = conn.getresponse()
miner_data = (
miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
uids = json.loads(miner_data)
# Condition for benchmark miner response
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps(
{
"uids": [uid],
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
init_response = conn.getresponse()
init_data = (
init_response.read()
.decode("utf-8")
.replace("\n", "")
.replace("\t", "")
)
init_json = json.loads(init_data)
if "choices" not in init_json:
continue
reply = init_json["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception:
continue
# For top miner based on bittensor response
try:
payload = json.dumps(
{
"top_n": top_n,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
response = conn.getresponse()
utf_string = (
response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception as e:
conn.request("GET", f"/error_msg?e={e}&p={prompt}", headers=headers)
return "Sorry I am unable to provide response now, Please try again later."
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"system_prompt": self.system_prompt,
"top_responses": self.top_responses,
}
| [
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor.",
"default_prompt + str(self.system_prompt)",
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~notiondb.py | from typing import Any, Dict, List, Optional
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
class NotionDBLoader(BaseLoader):
"""Load from `Notion DB`.
Reads content from pages within a Notion Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
request_timeout_sec (int): Timeout for Notion requests in seconds.
Defaults to 10.
"""
def __init__(
self,
integration_token: str,
database_id: str,
request_timeout_sec: Optional[int] = 10,
) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.request_timeout_sec = request_timeout_sec
def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
page_summaries = self._retrieve_page_summaries()
return list(self.load_page(page_summary) for page_summary in page_summaries)
def _retrieve_page_summaries(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[Dict[str, Any]]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
return pages
def load_page(self, page_summary: Dict[str, Any]) -> Document:
"""Read a page.
Args:
page_summary: Page summary from Notion API.
"""
page_id = page_summary["id"]
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
else []
)
elif prop_type == "url":
value = prop_data["url"]
elif prop_type == "unique_id":
value = (
f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}'
if prop_data["unique_id"]
else None
)
elif prop_type == "status":
value = prop_data["status"]["name"] if prop_data["status"] else None
elif prop_type == "people":
value = (
[item["name"] for item in prop_data["people"]]
if prop_data["people"]
else []
)
elif prop_type == "date":
value = prop_data["date"] if prop_data["date"] else None
elif prop_type == "last_edited_time":
value = (
prop_data["last_edited_time"]
if prop_data["last_edited_time"]
else None
)
elif prop_type == "created_time":
value = prop_data["created_time"] if prop_data["created_time"] else None
elif prop_type == "checkbox":
value = prop_data["checkbox"]
elif prop_type == "email":
value = prop_data["email"]
elif prop_type == "number":
value = prop_data["number"]
elif prop_type == "select":
value = prop_data["select"]["name"] if prop_data["select"] else None
else:
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {}
) -> Any:
res = requests.request(
method,
url,
headers=self.headers,
json=query_dict,
timeout=self.request_timeout_sec,
)
res.raise_for_status()
return res.json()
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~output_parsers~datetime.py | import random
from datetime import datetime, timedelta
from typing import List
from libs.core.langchain_core.exceptions import OutputParserException
from libs.core.langchain_core.output_parsers import BaseOutputParser
from langchain.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> List[str]:
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that used as the datetime format."""
def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~document_loaders~test_generic_loader.py | """Test generic loader."""
import os
import tempfile
from pathlib import Path
from typing import Any, Generator, Iterator
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob, FileSystemBlobLoader
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.txt import TextParser
@pytest.fixture
def toy_dir() -> Generator[Path, None, None]:
"""Yield a pre-populated directory to test the blob loader."""
with tempfile.TemporaryDirectory() as temp_dir:
# Create test.txt
with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt:
test_txt.write("This is a test.txt file.")
# Create test.html
with open(os.path.join(temp_dir, "test.html"), "w") as test_html:
test_html.write(
"<html><body><h1>This is a test.html file.</h1></body></html>"
)
# Create .hidden_file
with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file:
hidden_file.write("This is a hidden file.")
# Create some_dir/nested_file.txt
some_dir = os.path.join(temp_dir, "some_dir")
os.makedirs(some_dir)
with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file:
nested_file.write("This is a nested_file.txt file.")
# Create some_dir/other_dir/more_nested.txt
other_dir = os.path.join(some_dir, "other_dir")
os.makedirs(other_dir)
with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file:
nested_file.write("This is a more_nested.txt file.")
yield Path(temp_dir)
class AsIsParser(BaseBlobParser):
"""Parser created for testing purposes."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string())
def test__init__(toy_dir: str) -> None:
"""Test initialization from init."""
loader = GenericLoader(
FileSystemBlobLoader(toy_dir, suffixes=[".txt"]),
AsIsParser(),
)
docs = loader.load()
assert len(docs) == 3
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
def test_from_filesystem_classmethod(toy_dir: str) -> None:
"""Test generic loader."""
loader = GenericLoader.from_filesystem(
toy_dir, suffixes=[".txt"], parser=AsIsParser()
)
docs = loader.load()
assert len(docs) == 3
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
def test_from_filesystem_classmethod_with_path(toy_dir: str) -> None:
loader = GenericLoader.from_filesystem(os.path.join(toy_dir, "test.txt"))
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "This is a test.txt file."
def test_from_filesystem_classmethod_with_glob(toy_dir: str) -> None:
"""Test that glob parameter is taken into account."""
loader = GenericLoader.from_filesystem(toy_dir, glob="*.txt", parser=AsIsParser())
docs = loader.load()
assert len(docs) == 1
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
@pytest.mark.requires("tqdm")
def test_from_filesystem_classmethod_show_progress(toy_dir: str) -> None:
"""Test that glob parameter is taken into account."""
loader = GenericLoader.from_filesystem(
toy_dir, glob="*.txt", parser=AsIsParser(), show_progress=True
)
docs = loader.load()
assert len(docs) == 1
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
def test_from_filesystem_using_default_parser(toy_dir: str) -> None:
"""Use the default generic parser."""
loader = GenericLoader.from_filesystem(
toy_dir,
suffixes=[".txt"],
)
docs = loader.load()
assert len(docs) == 3
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
def test_specifying_parser_via_class_attribute(toy_dir: str) -> None:
class TextLoader(GenericLoader):
"""Parser created for testing purposes."""
@staticmethod
def get_parser(**kwargs: Any) -> BaseBlobParser:
return TextParser()
loader = TextLoader.from_filesystem(toy_dir, suffixes=[".txt"])
docs = loader.load()
assert len(docs) == 3
# Glob order seems to be deterministic with recursion. If this test becomes flaky,
# we can sort the docs by page content.
assert docs[0].page_content == "This is a test.txt file."
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~docarray~in_memory.py | """Wrapper around in-memory storage."""
from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional
from libs.core.langchain_core.embeddings import Embeddings
from langchain_community.vectorstores.docarray.base import (
DocArrayIndex,
_check_docarray_import,
)
class DocArrayInMemorySearch(DocArrayIndex):
"""In-memory `DocArray` storage for exact search.
To use it, you should have the ``docarray`` package with version >=0.32.0 installed.
You can install it with `pip install "langchain[docarray]"`.
"""
@classmethod
def from_params(
cls,
embedding: Embeddings,
metric: Literal[
"cosine_sim", "euclidian_dist", "sgeuclidean_dist"
] = "cosine_sim",
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Initialize DocArrayInMemorySearch store.
Args:
embedding (Embeddings): Embedding function.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import InMemoryExactNNIndex
doc_cls = cls._get_doc_cls(space=metric, **kwargs)
doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore
return cls(doc_index, embedding)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
"""
store = cls.from_params(embedding, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~openllm.py | from __future__ import annotations
import copy
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
TypedDict,
Union,
overload,
)
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import PrivateAttr
if TYPE_CHECKING:
import openllm
ServerType = Literal["http", "grpc"]
class IdentifyingParams(TypedDict):
"""Parameters for identifying a model as a typed dict."""
model_name: str
model_id: Optional[str]
server_url: Optional[str]
server_type: Optional[ServerType]
embedded: bool
llm_kwargs: Dict[str, Any]
logger = logging.getLogger(__name__)
class OpenLLM(LLM):
"""OpenLLM, supporting both in-process model
instance and remote OpenLLM servers.
To use, you should have the openllm library installed:
.. code-block:: bash
pip install openllm
Learn more at: https://github.com/bentoml/openllm
Example running an LLM model locally managed by OpenLLM:
.. code-block:: python
from langchain_community.llms import OpenLLM
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
)
llm("What is the difference between a duck and a goose?")
For all available supported models, you can run 'openllm models'.
If you have a OpenLLM server running, you can also use it remotely:
.. code-block:: python
from langchain_community.llms import OpenLLM
llm = OpenLLM(server_url='http://localhost:3000')
llm("What is the difference between a duck and a goose?")
"""
model_name: Optional[str] = None
"""Model name to use. See 'openllm models' for all available models."""
model_id: Optional[str] = None
"""Model Id to use. If not provided, will use the default model for the model name.
See 'openllm models' for all available model variants."""
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
server_type: ServerType = "http"
"""Optional server type. Either 'http' or 'grpc'."""
embedded: bool = True
"""Initialize this LLM instance in current process by default. Should
only set to False when using in conjunction with BentoML Service."""
llm_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to openllm.LLM"""
_runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None)
_client: Union[
openllm.client.HTTPClient, openllm.client.GrpcClient, None
] = PrivateAttr(default=None)
class Config:
extra = "forbid"
@overload
def __init__(
self,
model_name: Optional[str] = ...,
*,
model_id: Optional[str] = ...,
embedded: Literal[True, False] = ...,
**llm_kwargs: Any,
) -> None:
...
@overload
def __init__(
self,
*,
server_url: str = ...,
server_type: Literal["grpc", "http"] = ...,
**llm_kwargs: Any,
) -> None:
...
def __init__(
self,
model_name: Optional[str] = None,
*,
model_id: Optional[str] = None,
server_url: Optional[str] = None,
server_type: Literal["grpc", "http"] = "http",
embedded: bool = True,
**llm_kwargs: Any,
):
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm.'"
) from e
llm_kwargs = llm_kwargs or {}
if server_url is not None:
logger.debug("'server_url' is provided, returning a openllm.Client")
assert (
model_id is None and model_name is None
), "'server_url' and {'model_id', 'model_name'} are mutually exclusive"
client_cls = (
openllm.client.HTTPClient
if server_type == "http"
else openllm.client.GrpcClient
)
client = client_cls(server_url)
super().__init__(
**{
"server_url": server_url,
"server_type": server_type,
"llm_kwargs": llm_kwargs,
}
)
self._runner = None # type: ignore
self._client = client
else:
assert model_name is not None, "Must provide 'model_name' or 'server_url'"
# since the LLM are relatively huge, we don't actually want to convert the
# Runner with embedded when running the server. Instead, we will only set
# the init_local here so that LangChain users can still use the LLM
# in-process. Wrt to BentoML users, setting embedded=False is the expected
# behaviour to invoke the runners remotely.
# We need to also enable ensure_available to download and setup the model.
runner = openllm.Runner(
model_name=model_name,
model_id=model_id,
init_local=embedded,
ensure_available=True,
**llm_kwargs,
)
super().__init__(
**{
"model_name": model_name,
"model_id": model_id,
"embedded": embedded,
"llm_kwargs": llm_kwargs,
}
)
self._client = None # type: ignore
self._runner = runner
@property
def runner(self) -> openllm.LLMRunner:
"""
Get the underlying openllm.LLMRunner instance for integration with BentoML.
Example:
.. code-block:: python
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
embedded=False,
)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
svc = bentoml.Service("langchain-openllm", runners=[llm.runner])
@svc.api(input=Text(), output=Text())
def chat(input_text: str):
return agent.run(input_text)
"""
if self._runner is None:
raise ValueError("OpenLLM must be initialized locally with 'model_name'")
return self._runner
@property
def _identifying_params(self) -> IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()["model_name"]
model_id = self._client._metadata()["model_id"]
else:
if self._runner is None:
raise ValueError("Runner must be initialized.")
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(
json.loads(self._runner.identifying_params["configuration"])
)
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(
server_url=self.server_url,
server_type=self.server_type,
embedded=self.embedded,
llm_kwargs=self.llm_kwargs,
model_name=model_name,
model_id=model_id,
)
@property
def _llm_type(self) -> str:
return "openllm_client" if self._client else "openllm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
res = self._client.generate(
prompt, **config.model_dump(flatten=True)
).responses[0]
else:
assert self._runner is not None
res = self._runner(prompt, **config.model_dump(flatten=True))
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
async_client = openllm.client.AsyncHTTPClient(self.server_url)
res = (
await async_client.generate(prompt, **config.model_dump(flatten=True))
).responses[0]
else:
assert self._runner is not None
(
prompt,
generate_kwargs,
postprocess_kwargs,
) = self._runner.llm.sanitize_parameters(prompt, **kwargs)
generated_result = await self._runner.generate.async_run(
prompt, **generate_kwargs
)
res = self._runner.llm.postprocess_generate(
prompt, generated_result, **postprocess_kwargs
)
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~retrievers~test_wikipedia.py | """Integration test for Wikipedia Retriever."""
from typing import List
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.retrievers import WikipediaRetriever
@pytest.fixture
def retriever() -> WikipediaRetriever:
return WikipediaRetriever()
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: WikipediaRetriever) -> None:
retriever.load_all_available_meta = True
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=1, load_all_available_meta=True
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args_more() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=20, load_all_available_meta=False
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 20
assert_docs(docs, all_meta=False)
def test_load_no_result(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~cache.py | """
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by reducing the number of API calls you make
to the LLM provider.
Cache directly competes with Memory. See documentation for Pros and Cons.
**Class hierarchy:**
.. code-block::
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
"""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import uuid
import warnings
from datetime import timedelta
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine import Row
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from libs.core.langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.language_models.llms import LLM, get_prompts
from libs.core.langchain_core.load.dump import dumps
from libs.core.langchain_core.load.load import loads
from libs.core.langchain_core.outputs import ChatGeneration, Generation
from libs.core.langchain_core.utils import get_from_env
from langchain_community.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
from cassandra.cluster import Session as CassandraSession
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
def _dumps_generations(generations: RETURN_VAL_TYPE) -> str:
"""
Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation`
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: a single string representing a list of generations.
This function (+ its counterpart `_loads_generations`) rely on
the dumps/loads pair with Reviver, so are able to deal
with all subclasses of Generation.
Each item in the list can be `dumps`ed to a string,
then we make the whole list of strings into a json-dumped.
"""
return json.dumps([dumps(_item) for _item in generations])
def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]:
"""
Deserialization of a string into a generic RETURN_VAL_TYPE
(i.e. a sequence of `Generation`).
See `_dumps_generations`, the inverse of this function.
Args:
generations_str (str): A string representing a list of generations.
Compatible with the legacy cache-blob format
Does not raise exceptions for malformed entries, just logs a warning
and returns none: the caller should be prepared for such a cache miss.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
generations = [loads(_item_str) for _item_str in json.loads(generations_str)]
return generations
except (json.JSONDecodeError, TypeError):
# deferring the (soft) handling to after the legacy-format attempt
pass
try:
gen_dicts = json.loads(generations_str)
# not relying on `_load_generations_from_json` (which could disappear):
generations = [Generation(**generation_dict) for generation_dict in gen_dicts]
logger.warning(
f"Legacy 'Generation' cached blob encountered: '{generations_str}'"
)
return generations
except (json.JSONDecodeError, TypeError):
logger.warning(
f"Malformed/unparsable cached blob encountered: '{generations_str}'"
)
return None
class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
class UpstashRedisCache(BaseCache):
"""Cache that uses Upstash Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of UpstashRedisCache.
This method initializes an object with Upstash Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of an Upstash Redis
client class, allowing the object to interact with Upstash Redis
server for caching purposes.
Parameters:
redis_: An instance of Upstash Redis client class
(e.g., Redis) used for caching.
This allows the object to communicate with
Redis server for caching operations on.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from upstash_redis import Redis
except ImportError:
raise ValueError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Upstash Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"UpstashRedisCache supports caching of normal LLM generations, "
f"got {type(gen)}"
)
if isinstance(gen, ChatGeneration):
warnings.warn(
"NOTE: Generation has not been cached. UpstashRedisCache does not"
" support caching ChatModel outputs."
)
return
# Write to a HASH
key = self._key(prompt, llm_string)
mapping = {
str(idx): generation.text for idx, generation in enumerate(return_val)
}
self.redis.hset(key=key, values=mapping)
if self.ttl is not None:
self.redis.expire(key, self.ttl)
def clear(self, **kwargs: Any) -> None:
"""
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
"""
asynchronous = kwargs.get("asynchronous", False)
if asynchronous:
asynchronous = "ASYNC"
else:
asynchronous = "SYNC"
self.redis.flushdb(flush_type=asynchronous)
class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of RedisCache.
This method initializes an object with Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of a Redis
client class, allowing the object to interact with a Redis
server for caching purposes.
Parameters:
redis_ (Any): An instance of a Redis client class
(e.g., redis.Redis) used for caching.
This allows the object to communicate with a
Redis server for caching operations.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
try:
generations.append(loads(text))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
# Write to a Redis HASH
key = self._key(prompt, llm_string)
with self.redis.pipeline() as pipe:
pipe.hset(
key,
mapping={
str(idx): dumps(generation)
for idx, generation in enumerate(return_val)
},
)
if self.ttl is not None:
pipe.expire(key, self.ttl)
pipe.execute()
def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
DEFAULT_SCHEMA = {
"content_key": "prompt",
"text": [
{"name": "prompt"},
],
"extra": [{"name": "return_val"}, {"name": "llm_string"}],
}
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
from langchain_community.globals import set_llm_cache
from langchain_community.cache import RedisSemanticCache
from langchain_community.embeddings import OpenAIEmbeddings
set_llm_cache(RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
))
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
schema=cast(Dict, self.DEFAULT_SCHEMA),
)
except ValueError:
redis = RedisVectorstore(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
index_schema=cast(Dict, self.DEFAULT_SCHEMA),
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index_if_not_exist(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations: List = []
# Read from a Hash
results = llm_cache.similarity_search(
query=prompt,
k=1,
distance_threshold=self.score_threshold,
)
if results:
for document in results:
try:
generations.extend(loads(document.metadata["return_val"]))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.extend(
_load_generations_from_json(document.metadata["return_val"])
)
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
llm_cache = self._get_llm_cache(llm_string)
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": dumps([g for g in return_val]),
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
from langchain_community.globals import set_llm_cache
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
set_llm_cache(GPTCache(init_gptcache))
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
@classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
api_key: Optional[str] = None,
auth_token: Optional[str] = None, # for backwards compatibility
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
# Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility
try:
api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
except ValueError:
api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY")
credentials = CredentialProvider.from_string(api_key)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
CASSANDRA_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_cache"
CASSANDRA_CACHE_DEFAULT_TTL_SECONDS = None
class CassandraCache(BaseCache):
"""
Cache that uses Cassandra / Astra DB as a backend.
It uses a single Cassandra table.
The lookup keys (which get to form the primary key) are:
- prompt, a string
- llm_string, a deterministic str representation of the model parameters.
(needed to prevent collisions same-prompt-different-model collisions)
"""
def __init__(
self,
session: Optional[CassandraSession] = None,
keyspace: Optional[str] = None,
table_name: str = CASSANDRA_CACHE_DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = CASSANDRA_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize with a ready session and a keyspace name.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
table_name (str): name of the Cassandra table to use as cache
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
"""
try:
from cassio.table import ElasticCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
self.kv_cache = ElasticCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
keys=["llm_string", "prompt"],
primary_key_type=["TEXT", "TEXT"],
ttl_seconds=self.ttl_seconds,
skip_provisioning=skip_provisioning,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
item = self.kv_cache.get(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
if item is not None:
generations = _loads_generations(item["body_blob"])
# this protects against malformed cached items:
if generations is not None:
return generations
else:
return None
else:
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
blob = _dumps_generations(return_val)
self.kv_cache.put(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
body_blob=blob,
)
def delete_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> None:
"""
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.delete(prompt, llm_string=llm_string)
def delete(self, prompt: str, llm_string: str) -> None:
"""Evict from cache if there's an entry."""
return self.kv_cache.delete(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. This is for all LLMs at once."""
self.kv_cache.clear()
CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC = "dot"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD = 0.85
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_semantic_cache"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS = None
CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE = 16
class CassandraSemanticCache(BaseCache):
"""
Cache that uses Cassandra as a vector-store backend for semantic
(i.e. similarity-based) lookup.
It uses a single (vector) Cassandra table and stores, in principle,
cached values from several LLMs, so the LLM's llm_string is part
of the rows' primary keys.
The similarity is based on one of several distance metrics (default: "dot").
If choosing another metric, the default threshold is to be re-tuned accordingly.
"""
def __init__(
self,
session: Optional[CassandraSession],
keyspace: Optional[str],
embedding: Embeddings,
table_name: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME,
distance_metric: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC,
score_threshold: float = CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD,
ttl_seconds: Optional[int] = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize the cache with all relevant parameters.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
embedding (Embedding): Embedding provider for semantic
encoding and search.
table_name (str): name of the Cassandra (vector) table
to use as cache
distance_metric (str, 'dot'): which measure to adopt for
similarity searches
score_threshold (optional float): numeric value to use as
cutoff for the similarity searches
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from cassio.table import MetadataVectorCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.embedding = embedding
self.table_name = table_name
self.distance_metric = distance_metric
self.score_threshold = score_threshold
self.ttl_seconds = ttl_seconds
# The contract for this class has separate lookup and update:
# in order to spare some embedding calculations we cache them between
# the two calls.
# Note: each instance of this class has its own `_get_embedding` with
# its own lru.
@lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) -> List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.table = MetadataVectorCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
primary_key_type=["TEXT"],
vector_dimension=self.embedding_dimension,
ttl_seconds=self.ttl_seconds,
metadata_indexing=("allow", {"_llm_string_hash"}),
skip_provisioning=skip_provisioning,
)
def _get_embedding_dimension(self) -> int:
return len(self._get_embedding(text="This is a sample sentence."))
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
embedding_vector = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
body = _dumps_generations(return_val)
metadata = {
"_prompt": prompt,
"_llm_string_hash": llm_string_hash,
}
row_id = f"{_hash(prompt)}-{llm_string_hash}"
#
self.table.put(
body_blob=body,
vector=embedding_vector,
row_id=row_id,
metadata=metadata,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
def lookup_with_id(
self, prompt: str, llm_string: str
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry)
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
hits = list(
self.table.metric_ann_search(
vector=prompt_embedding,
metadata={"_llm_string_hash": _hash(llm_string)},
n=1,
metric=self.distance_metric,
metric_threshold=self.score_threshold,
)
)
if hits:
hit = hits[0]
generations = _loads_generations(hit["body_blob"])
if generations is not None:
# this protects against malformed cached items:
return (
hit["row_id"],
generations,
)
else:
return None
else:
return None
def lookup_with_id_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.lookup_with_id(prompt, llm_string=llm_string)
def delete_by_document_id(self, document_id: str) -> None:
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.table.delete(row_id=document_id)
def clear(self, **kwargs: Any) -> None:
"""Clear the *whole* semantic cache."""
self.table.clear()
class FullMd5LLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_md5_llm_cache"
id = Column(String, primary_key=True)
prompt_md5 = Column(String, index=True)
llm = Column(String, index=True)
idx = Column(Integer, index=True)
prompt = Column(String)
response = Column(String)
class SQLAlchemyMd5Cache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(
self, engine: Engine, cache_schema: Type[FullMd5LLMCache] = FullMd5LLMCache
):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [loads(row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
self._delete_previous(prompt, llm_string)
prompt_md5 = self.get_md5(prompt)
items = [
self.cache_schema(
id=str(uuid.uuid1()),
prompt=prompt,
prompt_md5=prompt_md5,
llm=llm_string,
response=dumps(gen),
idx=i,
)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def _delete_previous(self, prompt: str, llm_string: str) -> None:
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session, session.begin():
rows = session.execute(stmt).fetchall()
for item in rows:
session.delete(item)
def _search_rows(self, prompt: str, llm_string: str) -> List[Row]:
prompt_pd5 = self.get_md5(prompt)
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
return session.execute(stmt).fetchall()
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.execute(self.cache_schema.delete())
@staticmethod
def get_md5(input_string: str) -> str:
return hashlib.md5(input_string.encode()).hexdigest()
ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME = "langchain_astradb_cache"
class AstraDBCache(BaseCache):
"""
Cache that uses Astra DB as a backend.
It uses a single collection as a kv store
The lookup keys, combined in the _id of the documents, are:
- prompt, a string
- llm_string, a deterministic str representation of the model parameters.
(needed to prevent same-prompt-different-model collisions)
"""
def __init__(
self,
*,
collection_name: str = ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[Any] = None, # 'astrapy.db.AstraDB' if passed
namespace: Optional[str] = None,
):
"""
Create an AstraDB cache using a collection for storage.
Args (only keyword-arguments accepted):
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
"""
try:
from astrapy.db import (
AstraDB as LibAstraDB,
)
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import a recent astrapy python package. "
"Please install it with `pip install --upgrade astrapy`."
)
# Conflicting-arg checks:
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing "
"'token' and 'api_endpoint'."
)
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(
token=self.token,
api_endpoint=self.api_endpoint,
namespace=self.namespace,
)
self.collection = self.astra_db.create_collection(
collection_name=self.collection_name,
)
@staticmethod
def _make_id(prompt: str, llm_string: str) -> str:
return f"{_hash(prompt)}#{_hash(llm_string)}"
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
item = self.collection.find_one(
filter={
"_id": doc_id,
},
projection={
"body_blob": 1,
},
)["data"]["document"]
if item is not None:
generations = _loads_generations(item["body_blob"])
# this protects against malformed cached items:
if generations is not None:
return generations
else:
return None
else:
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
blob = _dumps_generations(return_val)
self.collection.upsert(
{
"_id": doc_id,
"body_blob": blob,
},
)
def delete_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> None:
"""
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.delete(prompt, llm_string=llm_string)
def delete(self, prompt: str, llm_string: str) -> None:
"""Evict from cache if there's an entry."""
doc_id = self._make_id(prompt, llm_string)
return self.collection.delete_one(doc_id)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. This is for all LLMs at once."""
self.astra_db.truncate_collection(self.collection_name)
ASTRA_DB_SEMANTIC_CACHE_DEFAULT_THRESHOLD = 0.85
ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME = "langchain_astradb_semantic_cache"
ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE = 16
class AstraDBSemanticCache(BaseCache):
"""
Cache that uses Astra DB as a vector-store backend for semantic
(i.e. similarity-based) lookup.
It uses a single (vector) collection and can store
cached values from several LLMs, so the LLM's 'llm_string' is stored
in the document metadata.
You can choose the preferred similarity (or use the API default) --
remember the threshold might require metric-dependend tuning.
"""
def __init__(
self,
*,
collection_name: str = ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[Any] = None, # 'astrapy.db.AstraDB' if passed
namespace: Optional[str] = None,
embedding: Embeddings,
metric: Optional[str] = None,
similarity_threshold: float = ASTRA_DB_SEMANTIC_CACHE_DEFAULT_THRESHOLD,
):
"""
Initialize the cache with all relevant parameters.
Args:
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
embedding (Embedding): Embedding provider for semantic
encoding and search.
metric: the function to use for evaluating similarity of text embeddings.
Defaults to 'cosine' (alternatives: 'euclidean', 'dot_product')
similarity_threshold (float, optional): the minimum similarity
for accepting a (semantic-search) match.
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from astrapy.db import (
AstraDB as LibAstraDB,
)
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import a recent astrapy python package. "
"Please install it with `pip install --upgrade astrapy`."
)
# Conflicting-arg checks:
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing "
"'token' and 'api_endpoint'."
)
self.embedding = embedding
self.metric = metric
self.similarity_threshold = similarity_threshold
# The contract for this class has separate lookup and update:
# in order to spare some embedding calculations we cache them between
# the two calls.
# Note: each instance of this class has its own `_get_embedding` with
# its own lru.
@lru_cache(maxsize=ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) -> List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(
token=self.token,
api_endpoint=self.api_endpoint,
namespace=self.namespace,
)
self.collection = self.astra_db.create_collection(
collection_name=self.collection_name,
dimension=self.embedding_dimension,
metric=self.metric,
)
def _get_embedding_dimension(self) -> int:
return len(self._get_embedding(text="This is a sample sentence."))
@staticmethod
def _make_id(prompt: str, llm_string: str) -> str:
return f"{_hash(prompt)}#{_hash(llm_string)}"
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
llm_string_hash = _hash(llm_string)
embedding_vector = self._get_embedding(text=prompt)
body = _dumps_generations(return_val)
#
self.collection.upsert(
{
"_id": doc_id,
"body_blob": body,
"llm_string_hash": llm_string_hash,
"$vector": embedding_vector,
}
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
def lookup_with_id(
self, prompt: str, llm_string: str
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry) for the top hit
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
hit = self.collection.vector_find_one(
vector=prompt_embedding,
filter={
"llm_string_hash": llm_string_hash,
},
fields=["body_blob", "_id"],
include_similarity=True,
)
if hit is None or hit["$similarity"] < self.similarity_threshold:
return None
else:
generations = _loads_generations(hit["body_blob"])
if generations is not None:
# this protects against malformed cached items:
return (hit["_id"], generations)
else:
return None
def lookup_with_id_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.lookup_with_id(prompt, llm_string=llm_string)
def delete_by_document_id(self, document_id: str) -> None:
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.collection.delete_one(document_id)
def clear(self, **kwargs: Any) -> None:
"""Clear the *whole* semantic cache."""
self.astra_db.truncate_collection(self.collection_name)
| [
"prompt"
] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_momento.py | """Test Momento chat message history functionality.
To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid
Momento auth token. This can be obtained by signing up for a free
Momento account at https://gomomento.com/.
"""
import json
import uuid
from datetime import timedelta
from typing import Iterator
import pytest
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import MomentoChatMessageHistory
def random_string() -> str:
return str(uuid.uuid4())
@pytest.fixture(scope="function")
def message_history() -> Iterator[MomentoChatMessageHistory]:
from momento import CacheClient, Configurations, CredentialProvider
cache_name = f"langchain-test-cache-{random_string()}"
client = CacheClient(
Configurations.Laptop.v1(),
CredentialProvider.from_environment_variable("MOMENTO_API_KEY"),
default_ttl=timedelta(seconds=30),
)
try:
chat_message_history = MomentoChatMessageHistory(
session_id="my-test-session",
cache_client=client,
cache_name=cache_name,
)
yield chat_message_history
finally:
client.delete_cache(cache_name)
def test_memory_empty_on_new_session(
message_history: MomentoChatMessageHistory,
) -> None:
memory = ConversationBufferMemory(
memory_key="foo", chat_memory=message_history, return_messages=True
)
assert memory.chat_memory.messages == []
def test_memory_with_message_store(message_history: MomentoChatMessageHistory) -> None:
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# Add some messages to the memory store
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# Verify that the messages are in the store
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# Verify clearing the store
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~edenai~image_explicitcontent.py | from __future__ import annotations
import logging
from typing import Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiExplicitImageTool(EdenaiTool):
"""Tool that queries the Eden AI Explicit image detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_image_explicit_content_detection"
description = (
"A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images.
it detects adult only content in images,
that is generally inappropriate for people under
the age of 18 and includes nudity, sexual activity,
pornography, violence, gore content, etc."""
"Input should be the string url of the image ."
)
combine_available = True
feature = "image"
subfeature = "explicit_content"
def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
for idx, found_obj in enumerate(json_data["items"]):
label = found_obj["label"].lower()
likelihood = found_obj["likelihood"]
result_str += f"{idx}: {label} likelihood {likelihood},\n"
return result_str[:-2]
def _parse_response(self, json_data: list) -> str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~multi_vector.py | from enum import Enum
from typing import Dict, List, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from libs.core.langchain_core.retrievers import BaseRetriever
from libs.core.langchain_core.stores import BaseStore, ByteStore
from libs.core.langchain_core.vectorstores import VectorStore
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@root_validator(pre=True)
def shim_docstore(cls, values: Dict) -> Dict:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception("You must pass a `byte_store` parameter.")
values["docstore"] = docstore
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~agent_toolkits~azure_cognitive_services.py | from __future__ import annotations
import sys
from typing import List
from libs.core.langchain_core.tools import BaseTool
from langchain_community.agent_toolkits.base import BaseToolkit
from langchain_community.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
AzureCogsTextAnalyticsHealthTool,
)
class AzureCognitiveServicesToolkit(BaseToolkit):
"""Toolkit for Azure Cognitive Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = [
AzureCogsFormRecognizerTool(),
AzureCogsSpeech2TextTool(),
AzureCogsText2SpeechTool(),
AzureCogsTextAnalyticsHealthTool(),
]
# TODO: Remove check once azure-ai-vision supports MacOS.
if sys.platform.startswith("linux") or sys.platform.startswith("win"):
tools.append(AzureCogsImageAnalysisTool())
return tools
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~vllm.py | from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import BaseLLM
from libs.core.langchain_core.outputs import Generation, LLMResult
from libs.core.langchain_core.pydantic_v1 import Field, root_validator
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
class VLLM(BaseLLM):
"""VLLM language model."""
model: str = ""
"""The name or path of a HuggingFace Transformers model."""
tensor_parallel_size: Optional[int] = 1
"""The number of GPUs to use for distributed execution with tensor parallelism."""
trust_remote_code: Optional[bool] = False
"""Trust remote code (e.g., from HuggingFace) when downloading the model
and tokenizer."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
best_of: Optional[int] = None
"""Number of output sequences that are generated from the prompt."""
presence_penalty: float = 0.0
"""Float that penalizes new tokens based on whether they appear in the
generated text so far"""
frequency_penalty: float = 0.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far"""
temperature: float = 1.0
"""Float that controls the randomness of the sampling."""
top_p: float = 1.0
"""Float that controls the cumulative probability of the top tokens to consider."""
top_k: int = -1
"""Integer that controls the number of top tokens to consider."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
stop: Optional[List[str]] = None
"""List of strings that stop the generation when they are generated."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
max_new_tokens: int = 512
"""Maximum number of tokens to generate per output sequence."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
dtype: str = "auto"
"""The data type for the model weights and activations."""
download_dir: Optional[str] = None
"""Directory to download and load the weights. (Default to the default
cache dir of huggingface)"""
vllm_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""
client: Any #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
values["client"] = VLLModel(
model=values["model"],
tensor_parallel_size=values["tensor_parallel_size"],
trust_remote_code=values["trust_remote_code"],
dtype=values["dtype"],
download_dir=values["download_dir"],
**values["vllm_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"use_beam_search": self.use_beam_search,
"logprobs": self.logprobs,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
from vllm import SamplingParams
# build sampling parameters
params = {**self._default_params, **kwargs, "stop": stop}
sampling_params = SamplingParams(**params)
# call the model
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm"
class VLLMOpenAI(BaseOpenAI):
"""vLLM OpenAI-compatible API client"""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
"logit_bias": None,
}
if not is_openai_v1():
params.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
}
)
return params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm-openai"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~google_jobs.py | """Util that calls Google Scholar Search."""
from typing import Any, Dict, Optional, cast
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
class GoogleJobsAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Scholar API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleJobsAPIWrapper
google_Jobs = GoogleJobsAPIWrapper()
google_Jobs.run('langchain')
"""
serp_search_engine: Any
serp_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Trends with Serpapi"""
# set up query
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_jobs",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()["jobs_results"]
# extract 1 job info:
res_str = ""
for i in range(1):
job = total_results[i]
res_str += (
"\n_______________________________________________"
+ f"\nJob Title: {job['title']}\n"
+ f"Company Name: {job['company_name']}\n"
+ f"Location: {job['location']}\n"
+ f"Description: {job['description']}"
+ "\n_______________________________________________\n"
)
return res_str + "\n"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~forefrontai.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
class ForefrontAI(LLM):
"""ForefrontAI large language models.
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import ForefrontAI
forefrontai = ForefrontAI(endpoint_url="")
"""
endpoint_url: str = ""
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
length: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 40
"""The number of highest probability vocabulary tokens to
keep for top-k-filtering."""
repetition_penalty: int = 1
"""Penalizes repeated tokens according to frequency."""
forefrontai_api_key: SecretStr
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["forefrontai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "forefrontai_api_key", "FOREFRONTAI_API_KEY")
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling ForefrontAI API."""
return {
"temperature": self.temperature,
"length": self.length,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"endpoint_url": self.endpoint_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "forefrontai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
auth_value = f"Bearer {self.forefrontai_api_key.get_secret_value()}"
response = requests.post(
url=self.endpoint_url,
headers={
"Authorization": auth_value,
"Content-Type": "application/json",
},
json={"text": prompt, **self._default_params, **kwargs},
)
response_json = response.json()
text = response_json["result"][0]["completion"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~replicate.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import GenerationChunk
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain_community.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "replicate"]
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~gmail~get_thread.py | from typing import Dict, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class GetThreadSchema(BaseModel):
"""Input for GetMessageTool."""
# From https://support.google.com/mail/answer/7190?hl=en
thread_id: str = Field(
...,
description="The thread ID.",
)
class GmailGetThread(GmailBaseTool):
"""Tool that gets a thread by ID from Gmail."""
name: str = "get_gmail_thread"
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Gmail query."
" The output is a JSON list of messages."
)
args_schema: Type[GetThreadSchema] = GetThreadSchema
def _run(
self,
thread_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
thread_data = query.execute()
if not isinstance(thread_data, dict):
raise ValueError("The output of the query must be a list.")
messages = thread_data["messages"]
thread_data["messages"] = []
keys_to_keep = ["id", "snippet", "snippet"]
# TODO: Parse body.
for message in messages:
thread_data["messages"].append(
{k: message[k] for k in keys_to_keep if k in message}
)
return thread_data
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~nlpcloud.py | from typing import Any, Dict, List
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class NLPCloudEmbeddings(BaseModel, Embeddings):
"""NLP Cloud embedding models.
To use, you should have the nlpcloud python package installed
Example:
.. code-block:: python
from langchain_community.embeddings import NLPCloudEmbeddings
embeddings = NLPCloudEmbeddings()
"""
model_name: str # Define model_name as a class attribute
gpu: bool # Define gpu as a class attribute
client: Any #: :meta private:
def __init__(
self,
model_name: str = "paraphrase-multilingual-mpnet-base-v2",
gpu: bool = False,
**kwargs: Any,
) -> None:
super().__init__(model_name=model_name, gpu=gpu, **kwargs)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=values["gpu"], lang="en"
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.client.embeddings(texts)["embeddings"]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using NLP Cloud.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.embeddings([text])["embeddings"][0]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~convert_to_openai.py | from libs.core.langchain_core.tools import BaseTool
from langchain_community.utils.openai_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_openai_function,
)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~sparql.py | """
Question answering over an RDF or OWL graph using SPARQL.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.prompts.base import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
SPARQL_GENERATION_SELECT_PROMPT,
SPARQL_GENERATION_UPDATE_PROMPT,
SPARQL_INTENT_PROMPT,
SPARQL_QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.graphs.rdf_graph import RdfGraph
class GraphSparqlQAChain(Chain):
"""Question-answering against an RDF or OWL graph by generating SPARQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: RdfGraph = Field(exclude=True)
sparql_generation_select_chain: LLMChain
sparql_generation_update_chain: LLMChain
sparql_intent_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT,
sparql_select_prompt: BasePromptTemplate = SPARQL_GENERATION_SELECT_PROMPT,
sparql_update_prompt: BasePromptTemplate = SPARQL_GENERATION_UPDATE_PROMPT,
sparql_intent_prompt: BasePromptTemplate = SPARQL_INTENT_PROMPT,
**kwargs: Any,
) -> GraphSparqlQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt)
sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt)
sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt)
return cls(
qa_chain=qa_chain,
sparql_generation_select_chain=sparql_generation_select_chain,
sparql_generation_update_chain=sparql_generation_update_chain,
sparql_intent_chain=sparql_intent_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
_intent = self.sparql_intent_chain.run({"prompt": prompt}, callbacks=callbacks)
intent = _intent.strip()
if "SELECT" in intent and "UPDATE" not in intent:
sparql_generation_chain = self.sparql_generation_select_chain
intent = "SELECT"
elif "UPDATE" in intent and "SELECT" not in intent:
sparql_generation_chain = self.sparql_generation_update_chain
intent = "UPDATE"
else:
raise ValueError(
"I am sorry, but this prompt seems to fit none of the currently "
"supported SPARQL query types, i.e., SELECT and UPDATE."
)
_run_manager.on_text("Identified intent:", end="\n", verbose=self.verbose)
_run_manager.on_text(intent, color="green", end="\n", verbose=self.verbose)
generated_sparql = sparql_generation_chain.run(
{"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_sparql, color="green", end="\n", verbose=self.verbose
)
if intent == "SELECT":
context = self.graph.query(generated_sparql)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"prompt": prompt, "context": context},
callbacks=callbacks,
)
res = result[self.qa_chain.output_key]
elif intent == "UPDATE":
self.graph.update(generated_sparql)
res = "Successfully inserted triples into the graph."
else:
raise ValueError("Unsupported SPARQL query type.")
return {self.output_key: res}
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~obsidian.py | import functools
import logging
import re
from pathlib import Path
from typing import Any, Dict, List
import yaml
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ObsidianLoader(BaseLoader):
"""Load `Obsidian` files from directory."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.DOTALL)
TEMPLATE_VARIABLE_REGEX = re.compile(r"{{(.*?)}}", re.DOTALL)
TAG_REGEX = re.compile(r"[^\S\/]#([a-zA-Z_]+[-_/\w]*)")
DATAVIEW_LINE_REGEX = re.compile(r"^\s*(\w+)::\s*(.*)$", re.MULTILINE)
DATAVIEW_INLINE_BRACKET_REGEX = re.compile(r"\[(\w+)::\s*(.*)\]", re.MULTILINE)
DATAVIEW_INLINE_PAREN_REGEX = re.compile(r"\((\w+)::\s*(.*)\)", re.MULTILINE)
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize with a path.
Args:
path: Path to the directory containing the Obsidian files.
encoding: Charset encoding, defaults to "UTF-8"
collect_metadata: Whether to collect metadata from the front matter.
Defaults to True.
"""
self.file_path = path
self.encoding = encoding
self.collect_metadata = collect_metadata
def _replace_template_var(
self, placeholders: Dict[str, str], match: re.Match
) -> str:
"""Replace a template variable with a placeholder."""
placeholder = f"__TEMPLATE_VAR_{len(placeholders)}__"
placeholders[placeholder] = match.group(1)
return placeholder
def _restore_template_vars(self, obj: Any, placeholders: Dict[str, str]) -> Any:
"""Restore template variables replaced with placeholders to original values."""
if isinstance(obj, str):
for placeholder, value in placeholders.items():
obj = obj.replace(placeholder, f"{{{{{value}}}}}")
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = self._restore_template_vars(value, placeholders)
elif isinstance(obj, list):
for i, item in enumerate(obj):
obj[i] = self._restore_template_vars(item, placeholders)
return obj
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
if not match:
return {}
placeholders: Dict[str, str] = {}
replace_template_var = functools.partial(
self._replace_template_var, placeholders
)
front_matter_text = self.TEMPLATE_VARIABLE_REGEX.sub(
replace_template_var, match.group(1)
)
try:
front_matter = yaml.safe_load(front_matter_text)
front_matter = self._restore_template_vars(front_matter, placeholders)
# If tags are a string, split them into a list
if "tags" in front_matter and isinstance(front_matter["tags"], str):
front_matter["tags"] = front_matter["tags"].split(", ")
return front_matter
except yaml.parser.ParserError:
logger.warning("Encountered non-yaml frontmatter")
return {}
def _to_langchain_compatible_metadata(self, metadata: dict) -> dict:
"""Convert a dictionary to a compatible with langchain."""
result = {}
for key, value in metadata.items():
if type(value) in {str, int, float}:
result[key] = value
else:
result[key] = str(value)
return result
def _parse_document_tags(self, content: str) -> set:
"""Return a set of all tags in within the document."""
if not self.collect_metadata:
return set()
match = self.TAG_REGEX.findall(content)
if not match:
return set()
return {tag for tag in match}
def _parse_dataview_fields(self, content: str) -> dict:
"""Parse obsidian dataview plugin fields from the content and return it
as a dict."""
if not self.collect_metadata:
return {}
return {
**{
match[0]: match[1]
for match in self.DATAVIEW_LINE_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_PAREN_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_BRACKET_REGEX.findall(content)
},
}
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def load(self) -> List[Document]:
"""Load documents."""
paths = list(Path(self.file_path).glob("**/*.md"))
docs = []
for path in paths:
with open(path, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
tags = self._parse_document_tags(text)
dataview_fields = self._parse_dataview_fields(text)
text = self._remove_front_matter(text)
metadata = {
"source": str(path.name),
"path": str(path),
"created": path.stat().st_ctime,
"last_modified": path.stat().st_mtime,
"last_accessed": path.stat().st_atime,
**self._to_langchain_compatible_metadata(front_matter),
**dataview_fields,
}
if tags or front_matter.get("tags"):
metadata["tags"] = ",".join(
tags | set(front_matter.get("tags", []) or [])
)
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [
"{{(.*?)}}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~iugu.py | import json
import urllib.request
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.utils import get_from_env, stringify_dict
from langchain_community.document_loaders.base import BaseLoader
IUGU_ENDPOINTS = {
"invoices": "https://api.iugu.com/v1/invoices",
"customers": "https://api.iugu.com/v1/customers",
"charges": "https://api.iugu.com/v1/charges",
"subscriptions": "https://api.iugu.com/v1/subscriptions",
"plans": "https://api.iugu.com/v1/plans",
}
class IuguLoader(BaseLoader):
"""Load from `IUGU`."""
def __init__(self, resource: str, api_token: Optional[str] = None) -> None:
"""Initialize the IUGU resource.
Args:
resource: The name of the resource to fetch.
api_token: The IUGU API token to use.
"""
self.resource = resource
api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN")
self.headers = {"Authorization": f"Bearer {api_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = IUGU_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
def load(self) -> List[Document]:
return self._get_resource()
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~zilliz.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.embeddings import Embeddings
from langchain_community.vectorstores.milvus import Milvus
logger = logging.getLogger(__name__)
class Zilliz(Milvus):
"""`Zilliz` vector store.
You need to have `pymilvus` installed and a
running Zilliz database.
See the following documentation for how to run a Zilliz instance:
https://docs.zilliz.com/docs/create-cluster
IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA.
Args:
embedding_function (Embeddings): Function used to embed the text.
collection_name (str): Which Zilliz collection to use. Defaults to
"LangChainCollection".
connection_args (Optional[dict[str, any]]): The connection args used for
this class comes in the form of a dict.
consistency_level (str): The consistency level to use for a collection.
Defaults to "Session".
index_params (Optional[dict]): Which index params to use. Defaults to
HNSW/AUTOINDEX depending on service.
search_params (Optional[dict]): Which search params to use. Defaults to
default of index.
drop_old (Optional[bool]): Whether to drop the current collection. Defaults
to False.
The connection args used for this class comes in the form of a dict,
here are a few of the options:
address (str): The actual address of Zilliz
instance. Example address: "localhost:19530"
uri (str): The uri of Zilliz instance. Example uri:
"https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com",
host (str): The host of Zilliz instance. Default at "localhost",
PyMilvus will fill in the default host if only port is provided.
port (str/int): The port of Zilliz instance. Default at 19530, PyMilvus
will fill in the default port if only host is provided.
user (str): Use which user to connect to Zilliz instance. If user and
password are provided, we will add related header in every RPC call.
password (str): Required when user is provided. The password
corresponding to the user.
token (str): API key, for serverless clusters which can be used as
replacements for user and password.
secure (bool): Default is false. If set to true, tls will be enabled.
client_key_path (str): If use tls two-way authentication, need to
write the client.key path.
client_pem_path (str): If use tls two-way authentication, need to
write the client.pem path.
ca_pem_path (str): If use tls two-way authentication, need to write
the ca.pem path.
server_pem_path (str): If use tls one-way authentication, need to
write the server.pem path.
server_name (str): If use tls, need to write the common name.
Example:
.. code-block:: python
from langchain_community.vectorstores import Zilliz
from langchain_community.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
# Connect to a Zilliz instance
milvus_store = Milvus(
embedding_function = embedding,
collection_name = "LangChainCollection",
connection_args = {
"uri": "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com",
"user": "temp",
"password": "temp",
"token": "temp", # API key as replacements for user and password
"secure": True
}
drop_old: True,
)
Raises:
ValueError: If the pymilvus python package is not installed.
"""
def _create_index(self) -> None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
# If no index params, use a default AutoIndex based one
if self.index_params is None:
self.index_params = {
"metric_type": "L2",
"index_type": "AUTOINDEX",
"params": {},
}
try:
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
# If default did not work, most likely Milvus self-hosted
except MilvusException:
# Use HNSW based index
self.index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index on collection: %s", self.collection_name
)
raise e
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "LangChainCollection",
connection_args: Optional[Dict[str, Any]] = None,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: bool = False,
**kwargs: Any,
) -> Zilliz:
"""Create a Zilliz collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Zilliz: Zilliz Vector Store
"""
vector_db = cls(
embedding_function=embedding,
collection_name=collection_name,
connection_args=connection_args or {},
consistency_level=consistency_level,
index_params=index_params,
search_params=search_params,
drop_old=drop_old,
**kwargs,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~rspace.py | import os
from typing import Any, Dict, Iterator, List, Optional, Union
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders.base import BaseLoader
class RSpaceLoader(BaseLoader):
"""Load content from RSpace notebooks, folders, documents or PDF Gallery files.
Map RSpace document <-> Langchain Document in 1-1. PDFs are imported using PyPDF.
Requirements are rspace_client (`pip install rspace_client`) and PyPDF if importing
PDF docs (`pip install pypdf`).
"""
def __init__(
self, global_id: str, api_key: Optional[str] = None, url: Optional[str] = None
):
"""api_key: RSpace API key - can also be supplied as environment variable
'RSPACE_API_KEY'
url: str
The URL of your RSpace instance - can also be supplied as environment
variable 'RSPACE_URL'
global_id: str
The global ID of the resource to load,
e.g. 'SD12344' (a single document); 'GL12345'(A PDF file in the gallery);
'NB4567' (a notebook); 'FL12244' (a folder)
"""
args: Dict[str, Optional[str]] = {
"api_key": api_key,
"url": url,
"global_id": global_id,
}
verified_args: Dict[str, str] = RSpaceLoader.validate_environment(args)
self.api_key = verified_args["api_key"]
self.url = verified_args["url"]
self.global_id: str = verified_args["global_id"]
@classmethod
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that API key and URL exist in environment."""
values["api_key"] = get_from_dict_or_env(values, "api_key", "RSPACE_API_KEY")
values["url"] = get_from_dict_or_env(values, "url", "RSPACE_URL")
if "global_id" not in values or values["global_id"] is None:
raise ValueError(
"No value supplied for global_id. Please supply an RSpace global ID"
)
return values
def _create_rspace_client(self) -> Any:
"""Create a RSpace client."""
try:
from rspace_client.eln import eln, field_content
except ImportError:
raise ImportError("You must run " "`pip install rspace_client`")
try:
eln = eln.ELNClient(self.url, self.api_key)
eln.get_status()
except Exception:
raise Exception(
f"Unable to initialize client - is url {self.url} or "
f"api key correct?"
)
return eln, field_content.FieldContent
def _get_doc(self, cli: Any, field_content: Any, d_id: Union[str, int]) -> Document:
content = ""
doc = cli.get_document(d_id)
content += f"<h2>{doc['name']}<h2/>"
for f in doc["fields"]:
content += f"{f['name']}\n"
fc = field_content(f["content"])
content += fc.get_text()
content += "\n"
return Document(
metadata={"source": f"rspace: {doc['name']}-{doc['globalId']}"},
page_content=content,
)
def _load_structured_doc(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
yield self._get_doc(cli, field_content, self.global_id)
def _load_folder_tree(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
if self.global_id:
docs_in_folder = cli.list_folder_tree(
folder_id=self.global_id[2:], typesToInclude=["document"]
)
doc_ids: List[int] = [d["id"] for d in docs_in_folder["records"]]
for doc_id in doc_ids:
yield self._get_doc(cli, field_content, doc_id)
def _load_pdf(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
file_info = cli.get_file_info(self.global_id)
_, ext = os.path.splitext(file_info["name"])
if ext.lower() == ".pdf":
outfile = f"{self.global_id}.pdf"
cli.download_file(self.global_id, outfile)
pdf_loader = PyPDFLoader(outfile)
for pdf in pdf_loader.lazy_load():
pdf.metadata["rspace_src"] = self.global_id
yield pdf
def lazy_load(self) -> Iterator[Document]:
if self.global_id and "GL" in self.global_id:
for d in self._load_pdf():
yield d
elif self.global_id and "SD" in self.global_id:
for d in self._load_structured_doc():
yield d
elif self.global_id and self.global_id[0:2] in ["FL", "NB"]:
for d in self._load_folder_tree():
yield d
else:
raise ValueError("Unknown global ID type")
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~vectorstores~docarray~test_hnsw.py | from pathlib import Path
from typing import List
import numpy as np
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.vectorstores.docarray import DocArrayHnswSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
def test_from_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
)
assert docsearch.doc_index.num_docs() == 3
def test_add_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_params(
work_dir=str(tmp_path),
n_dim=10,
embedding=FakeEmbeddings(),
)
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
index=True,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_all_configurations(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
dist_metric=metric,
n_dim=10,
max_elements=8,
ef_construction=300,
ef=20,
M=8,
allow_replace_deleted=False,
num_threads=2,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_by_vector(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and similarity search by vector."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
embedding = [1.0] * 10
output = hnsw_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content="bar")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_with_score(metric: str, tmp_path: Path) -> None:
"""Test end to end construction and similarity search with score."""
texts = ["foo", "bar", "baz"]
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=1)
assert len(output) == 1
out_doc, out_score = output[0]
assert out_doc == Document(page_content="foo")
assert np.isclose(out_score, 0.0, atol=1.0e-6)
def test_sim_search_with_score_for_ip_metric(texts: List[str], tmp_path: Path) -> None:
"""
Test end to end construction and similarity search with score for ip
(inner-product) metric.
"""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric="ip",
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=3)
assert len(output) == 3
for result in output:
assert result[1] == -8.0
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_max_marginal_relevance_search(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test MRR search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
dist_metric=metric,
work_dir=str(tmp_path),
n_dim=10,
)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~callbacks~comet_ml_callback.py | import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.callbacks import BaseCallbackHandler
from libs.core.langchain_core.outputs import Generation, LLMResult
import langchain_community
from langchain_community.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
LANGCHAIN_MODEL_NAME = "langchain-model"
def import_comet_ml() -> Any:
"""Import comet_ml and raise an error if it is not installed."""
try:
import comet_ml # noqa: F401
except ImportError:
raise ImportError(
"To use the comet_ml callback manager you need to have the "
"`comet_ml` python package installed. Please install it with"
" `pip install comet_ml`"
)
return comet_ml
def _get_experiment(
workspace: Optional[str] = None, project_name: Optional[str] = None
) -> Any:
comet_ml = import_comet_ml()
experiment = comet_ml.Experiment( # type: ignore
workspace=workspace,
project_name=project_name,
)
return experiment
def _fetch_text_complexity_metrics(text: str) -> dict:
textstat = import_textstat()
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
return text_complexity_metrics
def _summarize_metrics_for_generated_outputs(metrics: Sequence) -> dict:
pd = import_pandas()
metrics_df = pd.DataFrame(metrics)
metrics_summary = metrics_df.describe()
return metrics_summary.to_dict()
class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Comet.
Parameters:
job_type (str): The type of comet_ml task such as "inference",
"testing" or "qc"
project_name (str): The comet_ml project name
tags (list): Tags to add to the task
task_name (str): Name of the comet_ml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to Comet
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to Comet.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
stream_logs: bool = True,
) -> None:
"""Initialize callback handler."""
self.comet_ml = import_comet_ml()
super().__init__()
self.task_type = task_type
self.workspace = workspace
self.project_name = project_name
self.tags = tags
self.visualizations = visualizations
self.complexity_metrics = complexity_metrics
self.custom_metrics = custom_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
self.experiment = _get_experiment(workspace, project_name)
self.experiment.log_other("Created from", "langchain")
if tags:
self.experiment.add_tags(tags)
self.name = name
if self.name:
self.experiment.set_name(self.name)
warning = (
"The comet_ml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/comet-ml/issue-tracking/issues with the tag "
"`langchain`."
)
self.comet_ml.LOGGER.warning(warning)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
if self.visualizations:
spacy = import_spacy()
self.nlp = spacy.load("en_core_web_sm")
else:
self.nlp = None
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_start"})
metadata.update(flatten_dict(serialized))
metadata.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(metadata)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self._log_stream(prompt, metadata, self.step)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.action_records.append(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_end"})
metadata.update(flatten_dict(response.llm_output or {}))
metadata.update(self.get_custom_callback_meta())
output_complexity_metrics = []
output_custom_metrics = []
for prompt_idx, generations in enumerate(response.generations):
for gen_idx, generation in enumerate(generations):
text = generation.text
generation_resp = deepcopy(metadata)
generation_resp.update(flatten_dict(generation.dict()))
complexity_metrics = self._get_complexity_metrics(text)
if complexity_metrics:
output_complexity_metrics.append(complexity_metrics)
generation_resp.update(complexity_metrics)
custom_metrics = self._get_custom_metrics(
generation, prompt_idx, gen_idx
)
if custom_metrics:
output_custom_metrics.append(custom_metrics)
generation_resp.update(custom_metrics)
if self.stream_logs:
self._log_stream(text, metadata, self.step)
self.action_records.append(generation_resp)
self.on_llm_end_records.append(generation_resp)
self._log_text_metrics(output_complexity_metrics, step=self.step)
self._log_text_metrics(output_custom_metrics, step=self.step)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for chain_input_key, chain_input_val in inputs.items():
if isinstance(chain_input_val, str):
input_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_input_val, resp, self.step)
input_resp.update({chain_input_key: chain_input_val})
self.action_records.append(input_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Input Value for {chain_input_key} will not be logged"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_chain_end"})
resp.update(self.get_custom_callback_meta())
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, str):
output_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_output_val, resp, self.step)
output_resp.update({chain_output_key: chain_output_val})
self.action_records.append(output_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Output Value for {chain_output_key} will not be logged"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(input_str, resp, self.step)
resp.update({"input_str": input_str})
self.action_records.append(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(text, resp, self.step)
resp.update({"text": text})
self.action_records.append(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
output = finish.return_values["output"]
log = finish.log
resp.update({"action": "on_agent_finish", "log": log})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
tool = action.tool
tool_input = str(action.tool_input)
log = action.log
resp = self._init_resp()
resp.update({"action": "on_agent_action", "log": log, "tool": tool})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(tool_input, resp, self.step)
resp.update({"tool_input": tool_input})
self.action_records.append(resp)
def _get_complexity_metrics(self, text: str) -> dict:
"""Compute text complexity metrics using textstat.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
if self.complexity_metrics:
text_complexity_metrics = _fetch_text_complexity_metrics(text)
resp.update(text_complexity_metrics)
return resp
def _get_custom_metrics(
self, generation: Generation, prompt_idx: int, gen_idx: int
) -> dict:
"""Compute Custom Metrics for an LLM Generated Output
Args:
generation (LLMResult): Output generation from an LLM
prompt_idx (int): List index of the input prompt
gen_idx (int): List index of the generated output
Returns:
dict: A dictionary containing the custom metrics.
"""
resp = {}
if self.custom_metrics:
custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx)
resp.update(custom_metrics)
return resp
def flush_tracker(
self,
langchain_asset: Any = None,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = "comet-langchain-demo",
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
finish: bool = False,
reset: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
self._log_session(langchain_asset)
if langchain_asset:
try:
self._log_model(langchain_asset)
except Exception:
self.comet_ml.LOGGER.error(
"Failed to export agent or LLM to Comet",
exc_info=True,
extra={"show_traceback": True},
)
if finish:
self.experiment.end()
if reset:
self._reset(
task_type,
workspace,
project_name,
tags,
name,
visualizations,
complexity_metrics,
custom_metrics,
)
def _log_stream(self, prompt: str, metadata: dict, step: int) -> None:
self.experiment.log_text(prompt, metadata=metadata, step=step)
def _log_model(self, langchain_asset: Any) -> None:
model_parameters = self._get_llm_parameters(langchain_asset)
self.experiment.log_parameters(model_parameters, prefix="model")
langchain_asset_path = Path(self.temp_dir.name, "model.json")
model_name = self.name if self.name else LANGCHAIN_MODEL_NAME
try:
if hasattr(langchain_asset, "save"):
langchain_asset.save(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
except (ValueError, AttributeError, NotImplementedError) as e:
if hasattr(langchain_asset, "save_agent"):
langchain_asset.save_agent(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
else:
self.comet_ml.LOGGER.error(
f"{e}"
" Could not save Langchain Asset "
f"for {langchain_asset.__class__.__name__}"
)
def _log_session(self, langchain_asset: Optional[Any] = None) -> None:
try:
llm_session_df = self._create_session_analysis_dataframe(langchain_asset)
# Log the cleaned dataframe as a table
self.experiment.log_table("langchain-llm-session.csv", llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
metadata = {"langchain_version": str(langchain_community.__version__)}
# Log the langchain low-level records as a JSON file directly
self.experiment.log_asset_data(
self.action_records, "langchain-action_records.json", metadata=metadata
)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
self._log_visualizations(llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log visualizations to Comet",
exc_info=True,
extra={"show_traceback": True},
)
def _log_text_metrics(self, metrics: Sequence[dict], step: int) -> None:
if not metrics:
return
metrics_summary = _summarize_metrics_for_generated_outputs(metrics)
for key, value in metrics_summary.items():
self.experiment.log_metrics(value, prefix=key, step=step)
def _log_visualizations(self, session_df: Any) -> None:
if not (self.visualizations and self.nlp):
return
spacy = import_spacy()
prompts = session_df["prompts"].tolist()
outputs = session_df["text"].tolist()
for idx, (prompt, output) in enumerate(zip(prompts, outputs)):
doc = self.nlp(output)
sentence_spans = list(doc.sents)
for visualization in self.visualizations:
try:
html = spacy.displacy.render(
sentence_spans,
style=visualization,
options={"compact": True},
jupyter=False,
page=True,
)
self.experiment.log_asset_data(
html,
name=f"langchain-viz-{visualization}-{idx}.html",
metadata={"prompt": prompt},
step=idx,
)
except Exception as e:
self.comet_ml.LOGGER.warning(
e, exc_info=True, extra={"show_traceback": True}
)
return
def _reset(
self,
task_type: Optional[str] = None,
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
) -> None:
_task_type = task_type if task_type else self.task_type
_workspace = workspace if workspace else self.workspace
_project_name = project_name if project_name else self.project_name
_tags = tags if tags else self.tags
_name = name if name else self.name
_visualizations = visualizations if visualizations else self.visualizations
_complexity_metrics = (
complexity_metrics if complexity_metrics else self.complexity_metrics
)
_custom_metrics = custom_metrics if custom_metrics else self.custom_metrics
self.__init__( # type: ignore
task_type=_task_type,
workspace=_workspace,
project_name=_project_name,
tags=_tags,
name=_name,
visualizations=_visualizations,
complexity_metrics=_complexity_metrics,
custom_metrics=_custom_metrics,
)
self.reset_callback_meta()
self.temp_dir = tempfile.TemporaryDirectory()
def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict:
pd = import_pandas()
llm_parameters = self._get_llm_parameters(langchain_asset)
num_generations_per_prompt = llm_parameters.get("n", 1)
llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
# Repeat each input row based on the number of outputs generated per prompt
llm_start_records_df = llm_start_records_df.loc[
llm_start_records_df.index.repeat(num_generations_per_prompt)
].reset_index(drop=True)
llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_session_df = pd.merge(
llm_start_records_df,
llm_end_records_df,
left_index=True,
right_index=True,
suffixes=["_llm_start", "_llm_end"],
)
return llm_session_df
def _get_llm_parameters(self, langchain_asset: Any = None) -> dict:
if not langchain_asset:
return {}
try:
if hasattr(langchain_asset, "agent"):
llm_parameters = langchain_asset.agent.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm_chain"):
llm_parameters = langchain_asset.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm"):
llm_parameters = langchain_asset.llm.dict()
else:
llm_parameters = langchain_asset.dict()
except Exception:
return {}
return llm_parameters
| [
"n"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~ainetwork~value.py | import builtins
import json
from typing import Optional, Type, Union
from libs.core.langchain_core.callbacks import AsyncCallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class ValueSchema(BaseModel):
"""Schema for value operations."""
type: OperationType = Field(...)
path: str = Field(..., description="Blockchain reference path")
value: Optional[Union[int, str, float, dict]] = Field(
None, description="Value to be set at the path"
)
class AINValueOps(AINBaseTool):
"""Tool for value operations."""
name: str = "AINvalueOps"
description: str = """
Covers the read and write value for the AINetwork Blockchain database.
## SET
- Set a value at a given path
### Example
- type: SET
- path: /apps/langchain_test_1/object
- value: {1: 2, "34": 56}
## GET
- Retrieve a value at a given path
### Example
- type: GET
- path: /apps/langchain_test_1/DB
## Special paths
- `/accounts/<address>/balance`: Account balance
- `/accounts/<address>/nonce`: Account nonce
- `/apps`: Applications
- `/consensus`: Consensus
- `/checkin`: Check-in
- `/deposit/<service id>/<address>/<deposit id>`: Deposit
- `/deposit_accounts/<service id>/<address>/<account id>`: Deposit accounts
- `/escrow`: Escrow
- `/payments`: Payment
- `/sharding`: Sharding
- `/token/name`: Token name
- `/token/symbol`: Token symbol
- `/token/total_supply`: Token total supply
- `/transfer/<address from>/<address to>/<key>/value`: Transfer
- `/withdraw/<service id>/<address>/<withdraw id>`: Withdraw
"""
args_schema: Type[BaseModel] = ValueSchema
async def _arun(
self,
type: OperationType,
path: str,
value: Optional[Union[int, str, float, dict]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
try:
if type is OperationType.SET:
if value is None:
raise ValueError("'value' is required for SET operation.")
res = await self.interface.db.ref(path).setValue(
transactionInput=ValueOnlyTransactionInput(value=value)
)
elif type is OperationType.GET:
res = await self.interface.db.ref(path).getValue()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
| [
"\nCovers the read and write value for the AINetwork Blockchain database.\n\n## SET\n- Set a value at a given path\n\n### Example\n- type: SET\n- path: /apps/langchain_test_1/object\n- value: {1: 2, \"34\": 56}\n\n## GET\n- Retrieve a value at a given path\n\n### Example\n- type: GET\n- path: /apps/langchain_test_1/DB\n\n## Special paths\n- `/accounts/<address>/balance`: Account balance\n- `/accounts/<address>/nonce`: Account nonce\n- `/apps`: Applications\n- `/consensus`: Consensus\n- `/checkin`: Check-in\n- `/deposit/<service id>/<address>/<deposit id>`: Deposit\n- `/deposit_accounts/<service id>/<address>/<account id>`: Deposit accounts\n- `/escrow`: Escrow\n- `/payments`: Payment\n- `/sharding`: Sharding\n- `/token/name`: Token name\n- `/token/symbol`: Token symbol\n- `/token/total_supply`: Token total supply\n- `/transfer/<address from>/<address to>/<key>/value`: Transfer\n- `/withdraw/<service id>/<address>/<withdraw id>`: Withdraw\n"
] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~retrievers~test_weaviate_hybrid_search.py | """Test Weaviate functionality."""
import logging
import os
import uuid
from typing import Generator, Union
from uuid import uuid4
import pytest
from libs.core.langchain_core.documents import Document
from langchain_community.retrievers.weaviate_hybrid_search import (
WeaviateHybridSearchRetriever,
)
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
class TestWeaviateHybridSearchRetriever:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def weaviate_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the weaviate url."""
from weaviate import Client
url = "http://localhost:8080"
yield url
# Clear the test index
client = Client(url)
client.schema.delete_all()
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
output = retriever.get_relevant_documents("foo")
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="baz", metadata={"page": 2}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents_with_score(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
output = retriever.get_relevant_documents("foo", score=True)
for doc in output:
assert "_additional" in doc.metadata
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents_with_filter(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])]
)
where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0}
output = retriever.get_relevant_documents("foo", where_filter=where_filter)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents_with_uuids(self, weaviate_url: str) -> None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
# Weaviate replaces the object if the UUID already exists
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, "same-name") for text in texts]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name=f"LangChain_{uuid4().hex}",
text_key="text",
attributes=["page"],
)
for i, text in enumerate(texts):
retriever.add_documents(
[Document(page_content=text, metadata=metadatas[i])], uuids=[uuids[i]]
)
output = retriever.get_relevant_documents("foo")
assert len(output) == 1
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~graph_qa~arangodb.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.prompts import BasePromptTemplate
from libs.core.langchain_core.pydantic_v1 import Field
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
AQL_FIX_PROMPT,
AQL_GENERATION_PROMPT,
AQL_QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.graphs.arangodb_graph import ArangoGraph
class ArangoGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating AQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: ArangoGraph = Field(exclude=True)
aql_generation_chain: LLMChain
aql_fix_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
# Specifies the maximum number of AQL Query Results to return
top_k: int = 10
# Specifies the set of AQL Query Examples that promote few-shot-learning
aql_examples: str = ""
# Specify whether to return the AQL Query in the output dictionary
return_aql_query: bool = False
# Specify whether to return the AQL JSON Result in the output dictionary
return_aql_result: bool = False
# Specify the maximum amount of AQL Generation attempts that should be made
max_aql_generation_attempts: int = 3
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
@property
def _chain_type(self) -> str:
return "graph_aql_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = AQL_QA_PROMPT,
aql_generation_prompt: BasePromptTemplate = AQL_GENERATION_PROMPT,
aql_fix_prompt: BasePromptTemplate = AQL_FIX_PROMPT,
**kwargs: Any,
) -> ArangoGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt)
aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt)
return cls(
qa_chain=qa_chain,
aql_generation_chain=aql_generation_chain,
aql_fix_chain=aql_fix_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""
Generate an AQL statement from user input, use it retrieve a response
from an ArangoDB Database instance, and respond to the user input
in natural language.
Users can modify the following ArangoGraphQAChain Class Variables:
:var top_k: The maximum number of AQL Query Results to return
:type top_k: int
:var aql_examples: A set of AQL Query Examples that are passed to
the AQL Generation Prompt Template to promote few-shot-learning.
Defaults to an empty string.
:type aql_examples: str
:var return_aql_query: Whether to return the AQL Query in the
output dictionary. Defaults to False.
:type return_aql_query: bool
:var return_aql_result: Whether to return the AQL Query in the
output dictionary. Defaults to False
:type return_aql_result: bool
:var max_aql_generation_attempts: The maximum amount of AQL
Generation attempts to be made prior to raising the last
AQL Query Execution Error. Defaults to 3.
:type max_aql_generation_attempts: int
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
user_input = inputs[self.input_key]
#########################
# Generate AQL Query #
aql_generation_output = self.aql_generation_chain.run(
{
"adb_schema": self.graph.schema,
"aql_examples": self.aql_examples,
"user_input": user_input,
},
callbacks=callbacks,
)
#########################
aql_query = ""
aql_error = ""
aql_result = None
aql_generation_attempt = 1
while (
aql_result is None
and aql_generation_attempt < self.max_aql_generation_attempts + 1
):
#####################
# Extract AQL Query #
pattern = r"```(?i:aql)?(.*?)```"
matches = re.findall(pattern, aql_generation_output, re.DOTALL)
if not matches:
_run_manager.on_text(
"Invalid Response: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_generation_output, color="red", end="\n", verbose=self.verbose
)
raise ValueError(f"Response is Invalid: {aql_generation_output}")
aql_query = matches[0]
#####################
_run_manager.on_text(
f"AQL Query ({aql_generation_attempt}):", verbose=self.verbose
)
_run_manager.on_text(
aql_query, color="green", end="\n", verbose=self.verbose
)
#####################
# Execute AQL Query #
from arango import AQLQueryExecuteError
try:
aql_result = self.graph.query(aql_query, self.top_k)
except AQLQueryExecuteError as e:
aql_error = e.error_message
_run_manager.on_text(
"AQL Query Execution Error: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_error, color="yellow", end="\n\n", verbose=self.verbose
)
########################
# Retry AQL Generation #
aql_generation_output = self.aql_fix_chain.run(
{
"adb_schema": self.graph.schema,
"aql_query": aql_query,
"aql_error": aql_error,
},
callbacks=callbacks,
)
########################
#####################
aql_generation_attempt += 1
if aql_result is None:
m = f"""
Maximum amount of AQL Query Generation attempts reached.
Unable to execute the AQL Query due to the following error:
{aql_error}
"""
raise ValueError(m)
_run_manager.on_text("AQL Result:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(aql_result), color="green", end="\n", verbose=self.verbose
)
########################
# Interpret AQL Result #
result = self.qa_chain(
{
"adb_schema": self.graph.schema,
"user_input": user_input,
"aql_query": aql_query,
"aql_result": aql_result,
},
callbacks=callbacks,
)
########################
# Return results #
result = {self.output_key: result[self.qa_chain.output_key]}
if self.return_aql_query:
result["aql_query"] = aql_query
if self.return_aql_result:
result["aql_result"] = aql_result
return result
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~parsers~html~bs4.py | """Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Any, Dict, Iterator, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class BS4HTMLParser(BaseBlobParser):
"""Pparse HTML files using `Beautiful Soup`."""
def __init__(
self,
*,
features: str = "lxml",
get_text_separator: str = "",
**kwargs: Any,
) -> None:
"""Initialize a bs4 based HTML parser."""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.bs_kwargs = {"features": features, **kwargs}
self.get_text_separator = get_text_separator
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with blob.as_bytes_io() as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": blob.source,
"title": title,
}
yield Document(page_content=text, metadata=metadata)
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.