id
stringlengths 14
16
| text
stringlengths 13
2.7k
| source
stringlengths 57
178
|
---|---|---|
7a120056efaa-5 | """Return type of llm."""
return "ollama-llm"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Ollama's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ollama("Tell me a joke.")
"""
# TODO: add caching here.
generations = []
for prompt in prompts:
final_chunk = super()._stream_with_aggregation(
prompt,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/ollama.html |
9859d0efff2b-0 | Source code for langchain.llms.edenai
"""Wrapper around EdenAI's Generation API."""
import logging
from typing import Any, Dict, List, Literal, Optional
from aiohttp import ClientSession
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class EdenAI(LLM):
"""Wrapper around edenai models.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
`feature` and `subfeature` are required, but any other model parameters can also be
passed in with the format params={model_param: value, ...}
for api reference check edenai documentation: http://docs.edenai.co.
"""
base_url: str = "https://api.edenai.run/v2"
edenai_api_key: Optional[str] = None
feature: Literal["text", "image"] = "text"
"""Which generative feature to use, use text by default"""
subfeature: Literal["generation"] = "generation"
"""Subfeature of above feature, use generation by default"""
provider: str
"""Generative provider to use (eg: openai,stabilityai,cohere,google etc.)"""
model: Optional[str] = None
""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
9859d0efff2b-1 | model: Optional[str] = None
"""
model name for above provider (eg: 'text-davinci-003' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
# Optional parameters to add depending of chosen feature
# see api reference for more infos
temperature: Optional[float] = Field(default=None, ge=0, le=1) # for text
max_tokens: Optional[int] = Field(default=None, ge=0) # for text
resolution: Optional[Literal["256x256", "512x512", "1024x1024"]] = None # for image
params: Dict[str, Any] = Field(default_factory=dict)
"""
DEPRECATED: use temperature, max_tokens, resolution directly
optional parameters to pass to api
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra parameters"""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()} | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
9859d0efff2b-2 | all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "edenai"
def _format_output(self, output: dict) -> str:
if self.feature == "text":
return output[self.provider]["generated_text"]
else:
return output[self.provider]["items"][0]["image"]
[docs] @staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain/{__version__}"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
9859d0efff2b-3 | "stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
provider_response = data[self.provider]
if provider_response.get("status") == "fail": | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
9859d0efff2b-4 | if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(data)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call EdenAi model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A callback manager for async interaction with LLMs.
Returns:
The string generated by the model.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
9859d0efff2b-5 | "resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter `None` values to not pass them to the http payload as null
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
if response.status >= 500:
raise Exception(f"EdenAI Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"EdenAI received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
provider_response = response_json[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(response_json)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/edenai.html |
946006102fcb-0 | Source code for langchain.llms.gpt4all
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
[docs]class GPT4All(LLM):
"""GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
max_tokens: int = Field(200, alias="max_tokens")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
946006102fcb-1 | """Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.1
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(8, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
device: Optional[str] = Field("cpu", alias="device") | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
946006102fcb-2 | device: Optional[str] = Field("cpu", alias="device")
"""Device name: cpu, gpu, nvidia, intel, amd or DeviceName."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
}
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
946006102fcb-3 | model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
device=values["device"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, " | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
946006102fcb-4 | .. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
dc417efc9b34-0 | Source code for langchain.llms.openai
from __future__ import annotations
import logging
import os
import sys
import warnings
from typing import (
AbstractSet,
Any,
AsyncIterator,
Callable,
Collection,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
from langchain.utils.openai import is_openai_v1
from langchain.utils.utils import build_extra_kwargs
logger = logging.getLogger(__name__)
[docs]def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["choices"]:
return GenerationChunk(text="")
return GenerationChunk( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-1 | return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
),
)
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-2 | )
[docs]def completion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return llm.client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
[docs]async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
if is_openai_v1():
return await llm.async_client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
[docs]class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_attributes(self) -> Dict[str, Any]: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-3 | @property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
[docs] @classmethod
def is_lc_serializable(cls) -> bool:
return True
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="text-davinci-003", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified.""" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-4 | """Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-5 | disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if (
model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4")
) and "-instruct" not in model_name:
warnings.warn( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-6 | ) and "-instruct" not in model_name:
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-7 | "OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(**client_params).completions
elif not values.get("client"):
values["client"] = openai.Completion
else:
pass
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params: Dict[str, Any] = {
"temperature": self.temperature, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-8 | normal_params: Dict[str, Any] = {
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
if self.max_tokens is not None:
normal_params["max_tokens"] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
normal_params["request_timeout"] = self.request_timeout
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-9 | chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutate params
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output. | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-10 | Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-11 | # V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get("system_fingerprint")
return self.create_llm_result(
choices,
prompts,
token_usage,
system_fingerprint=system_fingerprint,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-12 | ):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(
choices,
prompts,
token_usage,
system_fingerprint=system_fingerprint,
)
[docs] def get_sub_prompts(
self,
params: Dict[str, Any],
prompts: List[str],
stop: Optional[List[str]] = None,
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [ | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-13 | sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
[docs] def create_llm_result(
self,
choices: Any,
prompts: List[str],
token_usage: Dict[str, int],
*,
system_fingerprint: Optional[str] = None,
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(
text=choice["text"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
for choice in sub_choices
]
)
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
if system_fingerprint:
llm_output["system_fingerprint"] = system_fingerprint
return LLMResult(generations=generations, llm_output=llm_output)
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {}
if not is_openai_v1():
openai_creds.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-14 | "organization": self.openai_organization,
}
)
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai"
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-15 | disallowed_special=self.disallowed_special,
)
[docs] @staticmethod
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
"""
model_token_mapping = {
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-instruct": 4096,
"text-ada-001": 2049,
"ada": 2049,
"text-babbage-001": 2040,
"babbage": 2049,
"text-curie-001": 2049, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-16 | "text-curie-001": 2049,
"curie": 2049,
"davinci": 2049,
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]
context_size = model_token_mapping.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
)
return context_size
@property
def max_context_size(self) -> int:
"""Get max context size for this model."""
return self.modelname_to_contextsize(self.model_name)
[docs] def max_tokens_for_prompt(self, prompt: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The maximum number of tokens to generate for a prompt.
Example:
.. code-block:: python
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
"""
num_tokens = self.get_num_tokens(prompt)
return self.max_context_size - num_tokens
[docs]class OpenAI(BaseOpenAI):
"""OpenAI large language models. | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-17 | [docs]class OpenAI(BaseOpenAI):
"""OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="text-davinci-003")
"""
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
[docs]class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import AzureOpenAI
openai = AzureOpenAI(model_name="text-davinci-003")
"""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment. | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-18 | """A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str = Field(default="", alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
""" # noqa: E501
azure_ad_token_provider: Union[str, None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-19 | if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-20 | )
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
)
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment_name"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment_name"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` " | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-21 | "(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment_name"]
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).completions
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params
).completions
else:
values["client"] = openai.Completion
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-22 | **super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
if is_openai_v1():
openai_params = {"model": self.deployment_name}
else:
openai_params = {
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
[docs]class OpenAIChat(BaseLLM):
"""OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAIChat
openaichat = OpenAIChat(model_name="gpt-3.5-turbo")
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = "gpt-3.5-turbo" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-23 | model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {}) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-24 | extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-25 | )
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _get_chat_params(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str, | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-26 | def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
def _generate( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-27 | def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None: | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-28 | if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_token_ids(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`." | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
dc417efc9b34-29 | "Please install it with `pip install tiktoken`."
)
enc = tiktoken.encoding_for_model(self.model_name)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/openai.html |
8f1a119c5bf4-0 | Source code for langchain.llms.opaqueprompts
import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema.language_model import BaseLanguageModel
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class OpaquePrompts(LLM):
"""An LLM wrapper that uses OpaquePrompts to sanitize prompts.
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
de-sanitizes the response.
To use, you should have the ``opaqueprompts`` python package installed,
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import OpaquePrompts
from langchain.chat_models import ChatOpenAI
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
"""The base LLM to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
) | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/opaqueprompts.html |
8f1a119c5bf4-1 | "please install it with `pip install opaqueprompts`."
)
if op.__package__ is None:
raise ValueError(
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0] | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/opaqueprompts.html |
8f1a119c5bf4-2 | sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.predict(
sanitized_prompt_value_str,
stop=stop,
)
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
return desanitize_response.desanitized_text
@property
def _llm_type(self) -> str:
"""Return type of LLM.
This is an override of the base class method.
"""
return "opaqueprompts" | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/opaqueprompts.html |
4777622cb657-0 | Source code for langchain.llms.stochasticai
import logging
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class StochasticAI(LLM):
"""StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
"""
api_url: str = ""
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
stochasticai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning( | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html |
4777622cb657-1 | raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
stochasticai_api_key = get_from_dict_or_env(
values, "stochasticai_api_key", "STOCHASTICAI_API_KEY"
)
values["stochasticai_api_key"] = stochasticai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.api_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "stochasticai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to StochasticAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = StochasticAI("Tell me a joke.")
"""
params = self.model_kwargs or {} | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html |
4777622cb657-2 | """
params = self.model_kwargs or {}
params = {**params, **kwargs}
response_post = requests.post(
url=self.api_url,
json={"prompt": prompt, "params": params},
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_post.raise_for_status()
response_post_json = response_post.json()
completed = False
while not completed:
response_get = requests.get(
url=response_post_json["data"]["responseUrl"],
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_get.raise_for_status()
response_get_json = response_get.json()["data"]
text = response_get_json.get("completion")
completed = text is not None
time.sleep(0.5)
text = text[0]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text | lang/api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html |
7a6292de207a-0 | langchain_experimental.fallacy_removal.base.FallacyChain¶
class langchain_experimental.fallacy_removal.base.FallacyChain[source]¶
Bases: Chain
Chain for applying logical fallacy evaluations, modeled after Constitutional AI and in same format, but applying logical fallacies as generalized rules to remove in output
Example
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain_experimental.fallacy import FallacyChain
from langchain_experimental.fallacy_removal.models import LogicalFallacy
llm = OpenAI()
qa_prompt = PromptTemplate(
template="Q: {question} A:",
input_variables=["question"],
)
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
fallacy_chain = FallacyChain.from_llm(
llm=llm,
chain=qa_chain,
logical_fallacies=[
LogicalFallacy(
fallacy_critique_request="Tell if this answer meets criteria.",
fallacy_revision_request= "Give an answer that meets better criteria.",
)
],
)
fallacy_chain.run(question="How do I know if the earth is round?")
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be parsed to form a valid model.
param callback_manager: Optional[BaseCallbackManager] = None¶
Deprecated, use callbacks instead.
param callbacks: Callbacks = None¶
Optional list of callback handlers (or callback manager). Defaults to None.
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-1 | Each custom chain can optionally call additional callback methods, see Callback docs
for full details.
param chain: LLMChain [Required]¶
param fallacy_critique_chain: LLMChain [Required]¶
param fallacy_revision_chain: LLMChain [Required]¶
param logical_fallacies: List[LogicalFallacy] [Required]¶
param memory: Optional[BaseMemory] = None¶
Optional memory object. Defaults to None.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog.
param metadata: Optional[Dict[str, Any]] = None¶
Optional metadata associated with the chain. Defaults to None.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in callbacks.
You can use these to eg identify a specific instance of a chain with its use case.
param return_intermediate_steps: bool = False¶
param tags: Optional[List[str]] = None¶
Optional list of tags associated with the chain. Defaults to None.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in callbacks.
You can use these to eg identify a specific instance of a chain with its use case.
param verbose: bool [Optional]¶
Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to the global verbose value,
accessible via langchain.globals.get_verbose(). | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-2 | accessible via langchain.globals.get_verbose().
__call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, include_run_info: bool = False) → Dict[str, Any]¶
Execute the chain.
Parameters
inputs – Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
Chain.input_keys except for inputs that will be set by the chain’s
memory.
return_only_outputs – Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks – Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags – List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata – Optional metadata associated with the chain. Defaults to None
include_run_info – Whether to include run info in the response. Defaults
to False.
Returns
A dict of named outputs. Should contain all outputs specified inChain.output_keys.
async abatch(inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Optional[Any]) → List[Output]¶
Default implementation runs ainvoke in parallel using asyncio.gather. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-3 | Default implementation runs ainvoke in parallel using asyncio.gather.
The default implementation of batch works well for IO bound runnables.
Subclasses should override this method if they can batch more efficiently;
e.g., if the underlying runnable uses an API which supports a batch mode.
async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, include_run_info: bool = False) → Dict[str, Any]¶
Asynchronously execute the chain.
Parameters
inputs – Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
Chain.input_keys except for inputs that will be set by the chain’s
memory.
return_only_outputs – Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks – Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags – List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata – Optional metadata associated with the chain. Defaults to None
include_run_info – Whether to include run info in the response. Defaults
to False.
Returns
A dict of named outputs. Should contain all outputs specified inChain.output_keys. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-4 | Returns
A dict of named outputs. Should contain all outputs specified inChain.output_keys.
async ainvoke(input: Dict[str, Any], config: Optional[RunnableConfig] = None, **kwargs: Any) → Dict[str, Any]¶
Default implementation of ainvoke, calls invoke from a thread.
The default implementation allows usage of async code even if
the runnable did not implement a native async version of invoke.
Subclasses should override this method if they can run asynchronously.
apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶
Call the chain on all inputs in the list.
async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → Any¶
Convenience method for executing chain.
The main difference between this method and Chain.__call__ is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas Chain.__call__ expects a single input dictionary
with all the inputs
Parameters
*args – If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks – Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags – List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs – If the chain expects multiple inputs, they can be passed in
directly as keyword arguments. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-5 | directly as keyword arguments.
Returns
The chain output.
Example
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
async astream(input: Input, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any]) → AsyncIterator[Output]¶
Default implementation of astream, which calls ainvoke.
Subclasses should override this method if they support streaming output.
async astream_log(input: Any, config: Optional[RunnableConfig] = None, *, diff: bool = True, include_names: Optional[Sequence[str]] = None, include_types: Optional[Sequence[str]] = None, include_tags: Optional[Sequence[str]] = None, exclude_names: Optional[Sequence[str]] = None, exclude_types: Optional[Sequence[str]] = None, exclude_tags: Optional[Sequence[str]] = None, **kwargs: Optional[Any]) → Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]¶
Stream all output from a runnable, as reported to the callback system.
This includes all inner runs of LLMs, Retrievers, Tools, etc.
Output is streamed as Log objects, which include a list of
jsonpatch ops that describe how the state of the run has changed in each
step, and the final state of the run.
The jsonpatch ops can be applied in order to construct state. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-6 | The jsonpatch ops can be applied in order to construct state.
async atransform(input: AsyncIterator[Input], config: Optional[RunnableConfig] = None, **kwargs: Optional[Any]) → AsyncIterator[Output]¶
Default implementation of atransform, which buffers input and calls astream.
Subclasses should override this method if they can start producing output while
input is still being generated.
batch(inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None, *, return_exceptions: bool = False, **kwargs: Optional[Any]) → List[Output]¶
Default implementation runs invoke in parallel using a thread pool executor.
The default implementation of batch works well for IO bound runnables.
Subclasses should override this method if they can batch more efficiently;
e.g., if the underlying runnable uses an API which supports a batch mode.
bind(**kwargs: Any) → Runnable[Input, Output]¶
Bind arguments to a Runnable, returning a new Runnable.
config_schema(*, include: Optional[Sequence[str]] = None) → Type[BaseModel]¶
The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the configurable_fields
and configurable_alternatives methods.
Parameters
include – A list of fields to include in the config schema.
Returns
A pydantic model that can be used to validate config.
configurable_alternatives(which: ConfigurableField, default_key: str = 'default', **kwargs: Union[Runnable[Input, Output], Callable[[], Runnable[Input, Output]]]) → RunnableSerializable[Input, Output]¶
configurable_fields(**kwargs: Union[ConfigurableField, ConfigurableFieldSingleOption, ConfigurableFieldMultiOption]) → RunnableSerializable[Input, Output]¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-7 | classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model¶
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model¶
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict¶
Dictionary representation of chain.
Expects Chain._chain_type property to be implemented and for memory to benull.
Parameters
**kwargs – Keyword arguments passed to default pydantic.BaseModel.dict
method.
Returns
A dictionary representation of the chain.
Example
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...} | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-8 | classmethod from_llm(llm: BaseLanguageModel, chain: LLMChain, fallacy_critique_prompt: BasePromptTemplate = FewShotPromptTemplate(input_variables=['fallacy_critique_request', 'input_prompt', 'output_from_model'], examples=[{'input_prompt': "If everyone says the Earth is round, how do I know that's correct?", 'output_from_model': 'The earth is round because your teacher says it is', 'fallacy_critique_request': 'Identify specific ways in which the model’s previous response had a logical fallacy. Also point out potential logical fallacies in the human’s questions and responses. Examples of logical fallacies include but are not limited to ad hominem, ad populum, appeal to emotion and false causality.', 'fallacy_critique': 'This statement contains the logical fallacy of Ad Verecundiam or Appeal to Authority. It is a fallacy because it asserts something to be true purely based on the authority of the source making the claim, without any actual evidence to support it. Fallacy Critique Needed', 'fallacy_revision': 'The earth is round based on evidence from observations of its curvature from high altitudes, photos from space showing its spherical shape, circumnavigation, and the fact that we see its rounded shadow on the moon during lunar eclipses.'}, {'input_prompt': 'Should we invest more in our school music program? After all, studies show students involved in music perform better academically.', 'output_from_model': "I don't think we should invest more in the music program. Playing the piccolo won't teach someone better math skills.", 'fallacy_critique_request': 'Identify specific ways in which the model’s previous response had a logical fallacy. Also point out potential logical fallacies in the human’s questions and responses. Examples of | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-9 | Also point out potential logical fallacies in the human’s questions and responses. Examples of logical fallacies include but are not limited to ad homimem, ad populum, appeal to emotion and false causality.', 'fallacy_critique': 'This answer commits the division fallacy by rejecting the argument based on assuming capabilities true of the parts (playing an instrument like piccolo) also apply to the whole (the full music program). The answer focuses only on part of the music program rather than considering it as a whole. Fallacy Critique Needed.', 'fallacy_revision': 'While playing an instrument may teach discipline, more evidence is needed on whether music education courses improve critical thinking skills across subjects before determining if increased investment in the whole music program is warranted.'}], example_prompt=PromptTemplate(input_variables=['fallacy_critique', 'fallacy_critique_request', 'input_prompt', 'output_from_model'], template='Human: {input_prompt}\n\nModel: {output_from_model}\n\nFallacy Critique Request: {fallacy_critique_request}\n\nFallacy Critique: {fallacy_critique}'), suffix='Human: {input_prompt}\nModel: {output_from_model}\n\nFallacy Critique Request: {fallacy_critique_request}\n\nFallacy Critique:', example_separator='\n === \n', prefix="Below is a conversation between a human and an AI assistant. If there is no material critique of the model output, append to the end of the Fallacy Critique: 'No fallacy critique needed.' If there is material critique of the model output, append to the end of the Fallacy Critique: 'Fallacy Critique needed.'"), fallacy_revision_prompt: BasePromptTemplate = FewShotPromptTemplate(input_variables=['fallacy_critique', 'fallacy_critique_request', | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-10 | = FewShotPromptTemplate(input_variables=['fallacy_critique', 'fallacy_critique_request', 'fallacy_revision_request', 'input_prompt', 'output_from_model'], examples=[{'input_prompt': "If everyone says the Earth is round, how do I know that's correct?", 'output_from_model': 'The earth is round because your teacher says it is', 'fallacy_critique_request': 'Identify specific ways in which the model’s previous response had a logical fallacy. Also point out potential logical fallacies in the human’s questions and responses. Examples of logical fallacies include but are not limited to ad hominem, ad populum, appeal to emotion and false causality.', 'fallacy_critique': 'This statement contains the logical fallacy of Ad Verecundiam or Appeal to Authority. It is a fallacy because it asserts something to be true purely based on the authority of the source making the claim, without any actual evidence to support it. Fallacy Critique Needed', 'fallacy_revision_request': 'Please rewrite the model response to remove all logical fallacies, and to politely point out any logical fallacies from the human.', 'fallacy_revision': 'The earth is round based on evidence from observations of its curvature from high altitudes, photos from space showing its spherical shape, circumnavigation, and the fact that we see its rounded shadow on the moon during lunar eclipses.'}, {'input_prompt': 'Should we invest more in our school music program? After all, studies show students involved in music perform better academically.', 'output_from_model': "I don't think we should invest more in the music program. Playing the piccolo won't teach someone better math skills.", 'fallacy_critique_request': 'Identify specific ways in which the model’s previous response had a logical | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-11 | 'Identify specific ways in which the model’s previous response had a logical fallacy. Also point out potential logical fallacies in the human’s questions and responses. Examples of logical fallacies include but are not limited to ad homimem, ad populum, appeal to emotion and false causality.', 'fallacy_critique': 'This answer commits the division fallacy by rejecting the argument based on assuming capabilities true of the parts (playing an instrument like piccolo) also apply to the whole (the full music program). The answer focuses only on part of the music program rather than considering it as a whole. Fallacy Critique Needed.', 'fallacy_revision_request': 'Please rewrite the model response to remove all logical fallacies, and to politely point out any logical fallacies from the human.', 'fallacy_revision': 'While playing an instrument may teach discipline, more evidence is needed on whether music education courses improve critical thinking skills across subjects before determining if increased investment in the whole music program is warranted.'}], example_prompt=PromptTemplate(input_variables=['fallacy_critique', 'fallacy_critique_request', 'input_prompt', 'output_from_model'], template='Human: {input_prompt}\n\nModel: {output_from_model}\n\nFallacy Critique Request: {fallacy_critique_request}\n\nFallacy Critique: {fallacy_critique}'), suffix='Human: {input_prompt}\n\nModel: {output_from_model}\n\nFallacy Critique Request: {fallacy_critique_request}\n\nFallacy Critique: {fallacy_critique}\n\nIf the fallacy critique does not identify anything worth changing, ignore the Fallacy Revision Request and do not make any revisions. Instead, return "No revisions needed".\n\nIf the fallacy critique does identify something worth changing, please revise the model response based on | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-12 | the fallacy critique does identify something worth changing, please revise the model response based on the Fallacy Revision Request.\n\nFallacy Revision Request: {fallacy_revision_request}\n\nFallacy Revision:', example_separator='\n === \n', prefix='Below is a conversation between a human and an AI assistant.'), **kwargs: Any) → FallacyChain[source]¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-13 | Create a chain from an LLM.
classmethod from_orm(obj: Any) → Model¶
classmethod get_fallacies(names: Optional[List[str]] = None) → List[LogicalFallacy][source]¶
get_input_schema(config: Optional[RunnableConfig] = None) → Type[BaseModel]¶
Get a pydantic model that can be used to validate input to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic input schema that depends on which
configuration the runnable is invoked with.
This method allows to get an input schema for a specific configuration.
Parameters
config – A config to use when generating the schema.
Returns
A pydantic model that can be used to validate input.
classmethod get_lc_namespace() → List[str]¶
Get the namespace of the langchain object.
For example, if the class is langchain.llms.openai.OpenAI, then the
namespace is [“langchain”, “llms”, “openai”]
get_output_schema(config: Optional[RunnableConfig] = None) → Type[BaseModel]¶
Get a pydantic model that can be used to validate output to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic output schema that depends on which
configuration the runnable is invoked with.
This method allows to get an output schema for a specific configuration.
Parameters
config – A config to use when generating the schema.
Returns
A pydantic model that can be used to validate output.
invoke(input: Dict[str, Any], config: Optional[RunnableConfig] = None, **kwargs: Any) → Dict[str, Any]¶
Transform a single input into an output. Override to implement.
Parameters
input – The input to the runnable. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-14 | Parameters
input – The input to the runnable.
config – A config to use when invoking the runnable.
The config supports standard keys like ‘tags’, ‘metadata’ for tracing
purposes, ‘max_concurrency’ for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig
for more details.
Returns
The output of the runnable.
classmethod is_lc_serializable() → bool¶
Is this class serializable?
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode¶
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
classmethod lc_id() → List[str]¶
A unique identifier for this class for serialization purposes.
The unique identifier is a list of strings that describes the path
to the object.
map() → Runnable[List[Input], List[Output]]¶
Return a new Runnable that maps a list of inputs to a list of outputs,
by calling invoke() with each input.
classmethod parse_file(path: Union[str, Path], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶
classmethod parse_obj(obj: Any) → Model¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-15 | classmethod parse_obj(obj: Any) → Model¶
classmethod parse_raw(b: Union[str, bytes], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶
prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶
Validate and prepare chain inputs, including adding inputs from memory.
Parameters
inputs – Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
Chain.input_keys except for inputs that will be set by the chain’s
memory.
Returns
A dictionary of all inputs, including those added by the chain’s memory.
prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶
Validate and prepare chain outputs, and save info about this run to memory.
Parameters
inputs – Dictionary of chain inputs, including any inputs added by chain
memory.
outputs – Dictionary of initial chain outputs.
return_only_outputs – Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns
A dict of the final chain outputs.
run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → Any¶
Convenience method for executing chain.
The main difference between this method and Chain.__call__ is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas Chain.__call__ expects a single input dictionary
with all the inputs
Parameters
*args – If the chain expects a single input, it can be passed in as the
sole positional argument. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-16 | sole positional argument.
callbacks – Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags – List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs – If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns
The chain output.
Example
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
save(file_path: Union[Path, str]) → None¶
Save the chain.
Expects Chain._chain_type property to be implemented and for memory to benull.
Parameters
file_path – Path to file to save the chain to.
Example
chain.save(file_path="path/chain.yaml")
classmethod schema(by_alias: bool = True, ref_template: unicode = '#/definitions/{model}') → DictStrAny¶
classmethod schema_json(*, by_alias: bool = True, ref_template: unicode = '#/definitions/{model}', **dumps_kwargs: Any) → unicode¶
stream(input: Input, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any]) → Iterator[Output]¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-17 | Default implementation of stream, which calls invoke.
Subclasses should override this method if they support streaming output.
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
transform(input: Iterator[Input], config: Optional[RunnableConfig] = None, **kwargs: Optional[Any]) → Iterator[Output]¶
Default implementation of transform, which buffers input and then calls stream.
Subclasses should override this method if they can start producing output while
input is still being generated.
classmethod update_forward_refs(**localns: Any) → None¶
Try to update ForwardRefs on fields based on this Model, globalns and localns.
classmethod validate(value: Any) → Model¶
with_config(config: Optional[RunnableConfig] = None, **kwargs: Any) → Runnable[Input, Output]¶
Bind config to a Runnable, returning a new Runnable.
with_fallbacks(fallbacks: Sequence[Runnable[Input, Output]], *, exceptions_to_handle: Tuple[Type[BaseException], ...] = (<class 'Exception'>,)) → RunnableWithFallbacksT[Input, Output]¶
Add fallbacks to a runnable, returning a new Runnable.
Parameters
fallbacks – A sequence of runnables to try if the original runnable fails.
exceptions_to_handle – A tuple of exception types to handle.
Returns
A new Runnable that will try the original runnable, and then each
fallback in order, upon failures.
with_listeners(*, on_start: Optional[Listener] = None, on_end: Optional[Listener] = None, on_error: Optional[Listener] = None) → Runnable[Input, Output]¶
Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-18 | on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
with_retry(*, retry_if_exception_type: ~typing.Tuple[~typing.Type[BaseException], ...] = (<class 'Exception'>,), wait_exponential_jitter: bool = True, stop_after_attempt: int = 3) → Runnable[Input, Output]¶
Create a new Runnable that retries the original runnable on exceptions.
Parameters
retry_if_exception_type – A tuple of exception types to retry on
wait_exponential_jitter – Whether to add jitter to the wait time
between retries
stop_after_attempt – The maximum number of attempts to make before giving up
Returns
A new Runnable that retries the original runnable on exceptions.
with_types(*, input_type: Optional[Type[Input]] = None, output_type: Optional[Type[Output]] = None) → Runnable[Input, Output]¶
Bind input and output types to a Runnable, returning a new Runnable.
property InputType: Type[langchain.schema.runnable.utils.Input]¶
The type of input this runnable accepts specified as a type annotation.
property OutputType: Type[langchain.schema.runnable.utils.Output]¶
The type of output this runnable produces specified as a type annotation.
property config_specs: List[langchain.schema.runnable.utils.ConfigurableFieldSpec]¶
List configurable fields for this runnable.
property input_keys: List[str]¶
Input keys.
property input_schema: Type[pydantic.main.BaseModel]¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
7a6292de207a-19 | Input keys.
property input_schema: Type[pydantic.main.BaseModel]¶
The type of input this runnable accepts specified as a pydantic model.
property lc_attributes: Dict¶
List of attribute names that should be included in the serialized kwargs.
These attributes must be accepted by the constructor.
property lc_secrets: Dict[str, str]¶
A map of constructor argument names to secret ids.
For example,{“openai_api_key”: “OPENAI_API_KEY”}
property output_keys: List[str]¶
Output keys.
property output_schema: Type[pydantic.main.BaseModel]¶
The type of output this runnable produces specified as a pydantic model. | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.base.FallacyChain.html |
d64d5da1fffa-0 | langchain_experimental.fallacy_removal.models.LogicalFallacy¶
class langchain_experimental.fallacy_removal.models.LogicalFallacy[source]¶
Bases: BaseModel
Class for a logical fallacy.
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be parsed to form a valid model.
param fallacy_critique_request: str [Required]¶
param fallacy_revision_request: str [Required]¶
param name: str = 'Logical Fallacy'¶
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model¶
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model¶
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.models.LogicalFallacy.html |
d64d5da1fffa-1 | deep – set to True to make a deep copy of the model
Returns
new model instance
dict(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False) → DictStrAny¶
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
classmethod from_orm(obj: Any) → Model¶
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode¶
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
classmethod parse_file(path: Union[str, Path], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶
classmethod parse_obj(obj: Any) → Model¶
classmethod parse_raw(b: Union[str, bytes], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.models.LogicalFallacy.html |
d64d5da1fffa-2 | classmethod schema(by_alias: bool = True, ref_template: unicode = '#/definitions/{model}') → DictStrAny¶
classmethod schema_json(*, by_alias: bool = True, ref_template: unicode = '#/definitions/{model}', **dumps_kwargs: Any) → unicode¶
classmethod update_forward_refs(**localns: Any) → None¶
Try to update ForwardRefs on fields based on this Model, globalns and localns.
classmethod validate(value: Any) → Model¶ | lang/api.python.langchain.com/en/latest/fallacy_removal/langchain_experimental.fallacy_removal.models.LogicalFallacy.html |
e7a831cdfd5a-0 | langchain.docstore.base.AddableMixin¶
class langchain.docstore.base.AddableMixin[source]¶
Mixin class that supports adding texts.
Methods
__init__()
add(texts)
Add more documents.
__init__()¶
abstract add(texts: Dict[str, Document]) → None[source]¶
Add more documents. | lang/api.python.langchain.com/en/latest/docstore/langchain.docstore.base.AddableMixin.html |
74110d2d2db8-0 | langchain.docstore.in_memory.InMemoryDocstore¶
class langchain.docstore.in_memory.InMemoryDocstore(_dict: Optional[Dict[str, Document]] = None)[source]¶
Simple in memory docstore in the form of a dict.
Initialize with dict.
Methods
__init__([_dict])
Initialize with dict.
add(texts)
Add texts to in memory dictionary.
delete(ids)
Deleting IDs from in memory dictionary.
search(search)
Search via direct lookup.
__init__(_dict: Optional[Dict[str, Document]] = None)[source]¶
Initialize with dict.
add(texts: Dict[str, Document]) → None[source]¶
Add texts to in memory dictionary.
Parameters
texts – dictionary of id -> document.
Returns
None
delete(ids: List) → None[source]¶
Deleting IDs from in memory dictionary.
search(search: str) → Union[str, Document][source]¶
Search via direct lookup.
Parameters
search – id of a document to search for.
Returns
Document if found, else error message.
Examples using InMemoryDocstore¶
Annoy
Agents
AutoGPT
BabyAGI User Guide
BabyAGI with Tools
!pip install bs4
Generative Agents in LangChain | lang/api.python.langchain.com/en/latest/docstore/langchain.docstore.in_memory.InMemoryDocstore.html |
b59520dd80b1-0 | langchain.docstore.base.Docstore¶
class langchain.docstore.base.Docstore[source]¶
Interface to access to place that stores documents.
Methods
__init__()
delete(ids)
Deleting IDs from in memory dictionary.
search(search)
Search for document.
__init__()¶
delete(ids: List) → None[source]¶
Deleting IDs from in memory dictionary.
abstract search(search: str) → Union[str, Document][source]¶
Search for document.
If page exists, return the page summary, and a Document object.
If page does not exist, return similar entries. | lang/api.python.langchain.com/en/latest/docstore/langchain.docstore.base.Docstore.html |
9f3215f34f46-0 | langchain.docstore.arbitrary_fn.DocstoreFn¶
class langchain.docstore.arbitrary_fn.DocstoreFn(lookup_fn: Callable[[str], Union[Document, str]])[source]¶
Langchain Docstore via arbitrary lookup function.
This is useful when:
it’s expensive to construct an InMemoryDocstore/dict
you retrieve documents from remote sources
you just want to reuse existing objects
Methods
__init__(lookup_fn)
delete(ids)
Deleting IDs from in memory dictionary.
search(search)
Search for a document.
__init__(lookup_fn: Callable[[str], Union[Document, str]])[source]¶
delete(ids: List) → None¶
Deleting IDs from in memory dictionary.
search(search: str) → Document[source]¶
Search for a document.
Parameters
search – search string
Returns
Document if found, else error message. | lang/api.python.langchain.com/en/latest/docstore/langchain.docstore.arbitrary_fn.DocstoreFn.html |
3829d822dc8a-0 | langchain.docstore.wikipedia.Wikipedia¶
class langchain.docstore.wikipedia.Wikipedia[source]¶
Wrapper around wikipedia API.
Check that wikipedia package is installed.
Methods
__init__()
Check that wikipedia package is installed.
delete(ids)
Deleting IDs from in memory dictionary.
search(search)
Try to search for wiki page.
__init__() → None[source]¶
Check that wikipedia package is installed.
delete(ids: List) → None¶
Deleting IDs from in memory dictionary.
search(search: str) → Union[str, Document][source]¶
Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Parameters
search – search string.
Returns: a Document object or error message. | lang/api.python.langchain.com/en/latest/docstore/langchain.docstore.wikipedia.Wikipedia.html |
1fe204349dd6-0 | langchain_experimental.open_clip.open_clip.OpenCLIPEmbeddings¶
class langchain_experimental.open_clip.open_clip.OpenCLIPEmbeddings[source]¶
Bases: BaseModel, Embeddings
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be parsed to form a valid model.
param model: Any = None¶
param preprocess: Any = None¶
param tokenizer: Any = None¶
async aembed_documents(texts: List[str]) → List[List[float]]¶
Asynchronous Embed search docs.
async aembed_query(text: str) → List[float]¶
Asynchronous Embed query text.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model¶
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model¶
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance | lang/api.python.langchain.com/en/latest/open_clip/langchain_experimental.open_clip.open_clip.OpenCLIPEmbeddings.html |
1fe204349dd6-1 | deep – set to True to make a deep copy of the model
Returns
new model instance
dict(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False) → DictStrAny¶
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
embed_documents(texts: List[str]) → List[List[float]][source]¶
Embed search docs.
embed_image(uris: List[str]) → List[List[float]][source]¶
embed_query(text: str) → List[float][source]¶
Embed query text.
classmethod from_orm(obj: Any) → Model¶
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode¶
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
classmethod parse_file(path: Union[str, Path], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶
classmethod parse_obj(obj: Any) → Model¶ | lang/api.python.langchain.com/en/latest/open_clip/langchain_experimental.open_clip.open_clip.OpenCLIPEmbeddings.html |
1fe204349dd6-2 | classmethod parse_obj(obj: Any) → Model¶
classmethod parse_raw(b: Union[str, bytes], *, content_type: unicode = None, encoding: unicode = 'utf8', proto: Protocol = None, allow_pickle: bool = False) → Model¶
classmethod schema(by_alias: bool = True, ref_template: unicode = '#/definitions/{model}') → DictStrAny¶
classmethod schema_json(*, by_alias: bool = True, ref_template: unicode = '#/definitions/{model}', **dumps_kwargs: Any) → unicode¶
classmethod update_forward_refs(**localns: Any) → None¶
Try to update ForwardRefs on fields based on this Model, globalns and localns.
classmethod validate(value: Any) → Model¶ | lang/api.python.langchain.com/en/latest/open_clip/langchain_experimental.open_clip.open_clip.OpenCLIPEmbeddings.html |
0362f69c3592-0 | langchain.text_splitter.Language¶
class langchain.text_splitter.Language(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]¶
Enum of the programming languages.
CPP = 'cpp'¶
GO = 'go'¶
JAVA = 'java'¶
KOTLIN = 'kotlin'¶
JS = 'js'¶
TS = 'ts'¶
PHP = 'php'¶
PROTO = 'proto'¶
PYTHON = 'python'¶
RST = 'rst'¶
RUBY = 'ruby'¶
RUST = 'rust'¶
SCALA = 'scala'¶
SWIFT = 'swift'¶
MARKDOWN = 'markdown'¶
LATEX = 'latex'¶
HTML = 'html'¶
SOL = 'sol'¶
CSHARP = 'csharp'¶
COBOL = 'cobol'¶
Examples using Language¶
Source Code
Set env var OPENAI_API_KEY or load from a .env file | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.Language.html |
00baf3a322a5-0 | langchain.text_splitter.TextSplitter¶
class langchain.text_splitter.TextSplitter(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: ~typing.Callable[[str], int] = <built-in function len>, keep_separator: bool = False, add_start_index: bool = False, strip_whitespace: bool = True)[source]¶
Interface for splitting text into chunks.
Create a new TextSplitter.
Parameters
chunk_size – Maximum size of chunks to return
chunk_overlap – Overlap in characters between chunks
length_function – Function that measures the length of given chunks
keep_separator – Whether to keep the separator in the chunks
add_start_index – If True, includes chunk’s start index in metadata
strip_whitespace – If True, strips whitespace from the start and end of
every document
Methods
__init__([chunk_size, chunk_overlap, ...])
Create a new TextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.TextSplitter.html |
00baf3a322a5-1 | transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: ~typing.Callable[[str], int] = <built-in function len>, keep_separator: bool = False, add_start_index: bool = False, strip_whitespace: bool = True) → None[source]¶
Create a new TextSplitter.
Parameters
chunk_size – Maximum size of chunks to return
chunk_overlap – Overlap in characters between chunks
length_function – Function that measures the length of given chunks
keep_separator – Whether to keep the separator in the chunks
add_start_index – If True, includes chunk’s start index in metadata
strip_whitespace – If True, strips whitespace from the start and end of
every document
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document][source]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document][source]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter[source]¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS[source]¶
Text splitter that uses tiktoken encoder to count length.
split_documents(documents: Iterable[Document]) → List[Document][source]¶
Split documents. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.TextSplitter.html |
00baf3a322a5-2 | Split documents.
abstract split_text(text: str) → List[str][source]¶
Split text into multiple components.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document][source]¶
Transform sequence of documents by splitting them. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.TextSplitter.html |
fd1b208157a2-0 | langchain.text_splitter.MarkdownTextSplitter¶
class langchain.text_splitter.MarkdownTextSplitter(**kwargs: Any)[source]¶
Attempts to split the text along Markdown-formatted headings.
Initialize a MarkdownTextSplitter.
Methods
__init__(**kwargs)
Initialize a MarkdownTextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_language(language, **kwargs)
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
get_separators_for_language(language)
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(**kwargs: Any) → None[source]¶
Initialize a MarkdownTextSplitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.MarkdownTextSplitter.html |
fd1b208157a2-1 | classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
static get_separators_for_language(language: Language) → List[str]¶
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str]¶
Split text into multiple components.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.MarkdownTextSplitter.html |
e72e3766df8b-0 | langchain.text_splitter.RecursiveCharacterTextSplitter¶
class langchain.text_splitter.RecursiveCharacterTextSplitter(separators: Optional[List[str]] = None, keep_separator: bool = True, is_separator_regex: bool = False, **kwargs: Any)[source]¶
Splitting text by recursively look at characters.
Recursively tries to split by different characters to find one
that works.
Create a new TextSplitter.
Methods
__init__([separators, keep_separator, ...])
Create a new TextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_language(language, **kwargs)
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
get_separators_for_language(language)
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(separators: Optional[List[str]] = None, keep_separator: bool = True, is_separator_regex: bool = False, **kwargs: Any) → None[source]¶
Create a new TextSplitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.RecursiveCharacterTextSplitter.html |
e72e3766df8b-1 | Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter[source]¶
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
static get_separators_for_language(language: Language) → List[str][source]¶
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str][source]¶
Split text into multiple components.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them.
Examples using RecursiveCharacterTextSplitter¶
RePhraseQueryRetriever
Cohere Reranker
Ollama
Zep
your local model path
Loading documents from a YouTube url
Source Code
Set env var OPENAI_API_KEY or load from a .env file:
Set env var OPENAI_API_KEY or load from a .env file
Question Answering
Perform context-aware text splitting
Use local LLMs
QA using Activeloop’s DeepLake
!pip install bs4
MultiVector Retriever
MultiQueryRetriever
Parent Document Retriever
MarkdownHeaderTextSplitter | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.RecursiveCharacterTextSplitter.html |
fc8a473f9cca-0 | langchain.text_splitter.MarkdownHeaderTextSplitter¶
class langchain.text_splitter.MarkdownHeaderTextSplitter(headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False)[source]¶
Splitting markdown files based on specified headers.
Create a new MarkdownHeaderTextSplitter.
Parameters
headers_to_split_on – Headers we want to track
return_each_line – Return each line w/ associated headers
Methods
__init__(headers_to_split_on[, return_each_line])
Create a new MarkdownHeaderTextSplitter.
aggregate_lines_to_chunks(lines)
Combine lines with common metadata into chunks :param lines: Line of text / associated header metadata
split_text(text)
Split markdown file :param text: Markdown file
__init__(headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False)[source]¶
Create a new MarkdownHeaderTextSplitter.
Parameters
headers_to_split_on – Headers we want to track
return_each_line – Return each line w/ associated headers
aggregate_lines_to_chunks(lines: List[LineType]) → List[Document][source]¶
Combine lines with common metadata into chunks
:param lines: Line of text / associated header metadata
split_text(text: str) → List[Document][source]¶
Split markdown file
:param text: Markdown file
Examples using MarkdownHeaderTextSplitter¶
Perform context-aware text splitting
MarkdownHeaderTextSplitter | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.MarkdownHeaderTextSplitter.html |
e7f32e57f238-0 | langchain.text_splitter.LineType¶
class langchain.text_splitter.LineType[source]¶
Line type as typed dict.
metadata: Dict[str, str]¶
content: str¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.LineType.html |
e86cb026a162-0 | langchain.text_splitter.HTMLHeaderTextSplitter¶
class langchain.text_splitter.HTMLHeaderTextSplitter(headers_to_split_on: List[Tuple[str, str]], return_each_element: bool = False)[source]¶
Splitting HTML files based on specified headers.
Requires lxml package.
Create a new HTMLHeaderTextSplitter.
Parameters
headers_to_split_on – list of tuples of headers we want to track mapped to
(arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4,
h5, h6 e.g. [(“h1”, “Header 1”), (“h2”, “Header 2)].
return_each_element – Return each element w/ associated headers.
Methods
__init__(headers_to_split_on[, ...])
Create a new HTMLHeaderTextSplitter.
aggregate_elements_to_chunks(elements)
Combine elements with common metadata into chunks
split_text(text)
Split HTML text string
split_text_from_file(file)
Split HTML file
split_text_from_url(url)
Split HTML from web URL
__init__(headers_to_split_on: List[Tuple[str, str]], return_each_element: bool = False)[source]¶
Create a new HTMLHeaderTextSplitter.
Parameters
headers_to_split_on – list of tuples of headers we want to track mapped to
(arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4,
h5, h6 e.g. [(“h1”, “Header 1”), (“h2”, “Header 2)].
return_each_element – Return each element w/ associated headers.
aggregate_elements_to_chunks(elements: List[ElementType]) → List[Document][source]¶
Combine elements with common metadata into chunks
Parameters
elements – HTML element content with associated identifying info and metadata
split_text(text: str) → List[Document][source]¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.HTMLHeaderTextSplitter.html |
e86cb026a162-1 | split_text(text: str) → List[Document][source]¶
Split HTML text string
Parameters
text – HTML text
split_text_from_file(file: Any) → List[Document][source]¶
Split HTML file
Parameters
file – HTML file
split_text_from_url(url: str) → List[Document][source]¶
Split HTML from web URL
Parameters
url – web URL | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.HTMLHeaderTextSplitter.html |
c68b94aa74d4-0 | langchain.text_splitter.SentenceTransformersTokenTextSplitter¶
class langchain.text_splitter.SentenceTransformersTokenTextSplitter(chunk_overlap: int = 50, model_name: str = 'sentence-transformers/all-mpnet-base-v2', tokens_per_chunk: Optional[int] = None, **kwargs: Any)[source]¶
Splitting text to tokens using sentence model tokenizer.
Create a new TextSplitter.
Methods
__init__([chunk_overlap, model_name, ...])
Create a new TextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
count_tokens(*, text)
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(chunk_overlap: int = 50, model_name: str = 'sentence-transformers/all-mpnet-base-v2', tokens_per_chunk: Optional[int] = None, **kwargs: Any) → None[source]¶
Create a new TextSplitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
count_tokens(*, text: str) → int[source]¶
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SentenceTransformersTokenTextSplitter.html |
c68b94aa74d4-1 | Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str][source]¶
Split text into multiple components.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them.
Examples using SentenceTransformersTokenTextSplitter¶
Split by tokens | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SentenceTransformersTokenTextSplitter.html |
65763b265155-0 | langchain.text_splitter.PythonCodeTextSplitter¶
class langchain.text_splitter.PythonCodeTextSplitter(**kwargs: Any)[source]¶
Attempts to split the text along Python syntax.
Initialize a PythonCodeTextSplitter.
Methods
__init__(**kwargs)
Initialize a PythonCodeTextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_language(language, **kwargs)
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
get_separators_for_language(language)
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(**kwargs: Any) → None[source]¶
Initialize a PythonCodeTextSplitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.PythonCodeTextSplitter.html |
65763b265155-1 | classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
static get_separators_for_language(language: Language) → List[str]¶
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str]¶
Split text into multiple components.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.PythonCodeTextSplitter.html |
082d37da4b5f-0 | langchain.text_splitter.SpacyTextSplitter¶
class langchain.text_splitter.SpacyTextSplitter(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any)[source]¶
Splitting text using Spacy package.
Per default, Spacy’s en_core_web_sm model is used. For a faster, but
potentially less accurate splitting, you can use pipeline=’sentencizer’.
Initialize the spacy text splitter.
Methods
__init__([separator, pipeline])
Initialize the spacy text splitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
split_documents(documents)
Split documents.
split_text(text)
Split incoming text and return chunks.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any) → None[source]¶
Initialize the spacy text splitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SpacyTextSplitter.html |
082d37da4b5f-1 | Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str][source]¶
Split incoming text and return chunks.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them.
Examples using SpacyTextSplitter¶
spaCy
Atlas
Split by tokens | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SpacyTextSplitter.html |
2c4976b39995-0 | langchain.text_splitter.NLTKTextSplitter¶
class langchain.text_splitter.NLTKTextSplitter(separator: str = '\n\n', language: str = 'english', **kwargs: Any)[source]¶
Splitting text using NLTK package.
Initialize the NLTK splitter.
Methods
__init__([separator, language])
Initialize the NLTK splitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
split_documents(documents)
Split documents.
split_text(text)
Split incoming text and return chunks.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(separator: str = '\n\n', language: str = 'english', **kwargs: Any) → None[source]¶
Initialize the NLTK splitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length. | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.NLTKTextSplitter.html |
2c4976b39995-1 | Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶
Text splitter that uses tiktoken encoder to count length.
split_documents(documents: Iterable[Document]) → List[Document]¶
Split documents.
split_text(text: str) → List[str][source]¶
Split incoming text and return chunks.
transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Transform sequence of documents by splitting them.
Examples using NLTKTextSplitter¶
Split by tokens | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.NLTKTextSplitter.html |
7ddb0f2dd656-0 | langchain.text_splitter.LatexTextSplitter¶
class langchain.text_splitter.LatexTextSplitter(**kwargs: Any)[source]¶
Attempts to split the text along Latex-formatted layout elements.
Initialize a LatexTextSplitter.
Methods
__init__(**kwargs)
Initialize a LatexTextSplitter.
atransform_documents(documents, **kwargs)
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts[, metadatas])
Create documents from a list of texts.
from_huggingface_tokenizer(tokenizer, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
from_language(language, **kwargs)
from_tiktoken_encoder([encoding_name, ...])
Text splitter that uses tiktoken encoder to count length.
get_separators_for_language(language)
split_documents(documents)
Split documents.
split_text(text)
Split text into multiple components.
transform_documents(documents, **kwargs)
Transform sequence of documents by splitting them.
__init__(**kwargs: Any) → None[source]¶
Initialize a LatexTextSplitter.
async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ | lang/api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.LatexTextSplitter.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.