id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
961e6b14eed3-105
field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-106
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int][source]#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-107
get_token_ids(text: str) → List[int][source]# Get the token IDs using the tiktoken package. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.OpenLM[source]# Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field allowed_special: Union[Literal['all'], AbstractSet[str]] = {}# Set of special tokens that are allowed。
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-108
Set of special tokens that are allowed。 field batch_size: int = 20# Batch size to use when passing multiple documents to generate. field best_of: int = 1# Generates best_of completions server-side and returns the “best”. field disallowed_special: Union[Literal['all'], Collection[str]] = 'all'# Set of special tokens that are not allowed。 field frequency_penalty: float = 0# Penalizes repeated tokens according to frequency. field logit_bias: Optional[Dict[str, float]] [Optional]# Adjust the probability of specific tokens being generated. field max_retries: int = 6# Maximum number of retries to make when generating. field max_tokens: int = 256# The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field model_name: str = 'text-davinci-003' (alias 'model')# Model name to use. field n: int = 1# How many completions to generate for each prompt. field presence_penalty: float = 0# Penalizes repeated tokens. field request_timeout: Optional[Union[float, Tuple[float, float]]] = None# Timeout for requests to OpenAI completion API. Default is 600 seconds. field streaming: bool = False# Whether to stream the results or not. field temperature: float = 0.7# What sampling temperature to use. field top_p: float = 1# Total probability mass of tokens to consider at each step. field verbose: bool [Optional]# Whether to print out response text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-109
field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-110
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance create_llm_result(choices: Any, prompts: List[str], token_usage: Dict[str, int]) → langchain.schema.LLMResult# Create the LLMResult from the choices and prompts. dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-111
Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_sub_prompts(params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None) → List[List[str]]# Get the sub prompts for llm call. get_token_ids(text: str) → List[int]# Get the token IDs using the tiktoken package. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). max_tokens_for_prompt(prompt: str) → int# Calculate the maximum number of tokens possible to generate for a prompt. Parameters prompt – The prompt to pass into the model. Returns The maximum number of tokens to generate for a prompt. Example max_tokens = openai.max_token_for_prompt("Tell me a joke.") modelname_to_contextsize(modelname: str) → int# Calculate the maximum number of tokens possible to generate for a model. Parameters modelname – The modelname we want to know the context size for. Returns The maximum context size Example max_tokens = openai.modelname_to_contextsize("text-davinci-003")
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-112
Example max_tokens = openai.modelname_to_contextsize("text-davinci-003") predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. prep_streaming_params(stop: Optional[List[str]] = None) → Dict[str, Any]# Prepare the params for streaming. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) stream(prompt: str, stop: Optional[List[str]] = None) → Generator# Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Parameters prompt – The prompts to pass into the model. stop – Optional list of stop words to use when generating. Returns A generator representing the stream of tokens from OpenAI. Example generator = openai.stream("Tell me a joke.") for token in generator: yield token classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Petals[source]# Wrapper around Petals Bloom models. To use, you should have the petals python package installed, and the environment variable HUGGINGFACE_API_KEY set with your API key. Any parameters that are valid to be passed to the call can be passed
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-113
Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field client: Any = None# The client to use for the API calls. field do_sample: bool = True# Whether or not to use sampling; use greedy decoding otherwise. field max_length: Optional[int] = None# The maximum length of the sequence to be generated. field max_new_tokens: int = 256# The maximum number of new tokens to generate in the completion. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field model_name: str = 'bigscience/bloom-petals'# The model to use. field temperature: float = 0.7# What sampling temperature to use field tokenizer: Any = None# The tokenizer to use for the API calls. field top_k: Optional[int] = None# The number of highest probability vocabulary tokens to keep for top-k-filtering. field top_p: float = 0.9# The cumulative probability for top-p sampling. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-114
Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-115
Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-116
Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PipelineAI[source]# Wrapper around PipelineAI large language models. To use, you should have the pipeline-ai python package installed, and the environment variable PIPELINE_API_KEY set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example Validators
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-117
in, even if not explicitly saved on this class. Example Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field pipeline_key: str = ''# The id or tag of the target pipeline field pipeline_kwargs: Dict[str, Any] [Optional]# Holds any pipeline parameters valid for create call not explicitly specified. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-118
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-119
Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PredictionGuard[source]# Wrapper around Prediction Guard large language models.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-120
Wrapper around Prediction Guard large language models. To use, you should have the predictionguard python package installed, and the environment variable PREDICTIONGUARD_TOKEN set with your access token, or pass it as a named parameter to the constructor. .. rubric:: Example Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field max_tokens: int = 256# Denotes the number of tokens to predict per generation. field name: Optional[str] = 'default-text-gen'# Proxy name to use. field temperature: float = 0.75# A non-negative float that tunes the degree of randomness in generation. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-121
Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-122
Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-123
Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PromptLayerOpenAI[source]# Wrapper around OpenAI large language models. To use, you should have the openai and promptlayer python package installed, and the environment variable OPENAI_API_KEY and PROMPTLAYER_API_KEY set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional :param pl_tags: List of strings to tag the request with. :param return_pl_id: If True, the PromptLayer request ID will be returned in the generation_info field of the Generation object. Example from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-124
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-125
deep – set to True to make a deep copy of the model Returns new model instance create_llm_result(choices: Any, prompts: List[str], token_usage: Dict[str, int]) → langchain.schema.LLMResult# Create the LLMResult from the choices and prompts. dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_sub_prompts(params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None) → List[List[str]]# Get the sub prompts for llm call. get_token_ids(text: str) → List[int]# Get the token IDs using the tiktoken package.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-126
Get the token IDs using the tiktoken package. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). max_tokens_for_prompt(prompt: str) → int# Calculate the maximum number of tokens possible to generate for a prompt. Parameters prompt – The prompt to pass into the model. Returns The maximum number of tokens to generate for a prompt. Example max_tokens = openai.max_token_for_prompt("Tell me a joke.") modelname_to_contextsize(modelname: str) → int# Calculate the maximum number of tokens possible to generate for a model. Parameters modelname – The modelname we want to know the context size for. Returns The maximum context size Example max_tokens = openai.modelname_to_contextsize("text-davinci-003") predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. prep_streaming_params(stop: Optional[List[str]] = None) → Dict[str, Any]# Prepare the params for streaming.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-127
Prepare the params for streaming. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) stream(prompt: str, stop: Optional[List[str]] = None) → Generator# Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Parameters prompt – The prompts to pass into the model. stop – Optional list of stop words to use when generating. Returns A generator representing the stream of tokens from OpenAI. Example generator = openai.stream("Tell me a joke.") for token in generator: yield token classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PromptLayerOpenAIChat[source]# Wrapper around OpenAI large language models. To use, you should have the openai and promptlayer python package installed, and the environment variable OPENAI_API_KEY and PROMPTLAYER_API_KEY set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional :param pl_tags: List of strings to tag the request with. :param return_pl_id: If True, the PromptLayer request ID will be returned in the generation_info field of the Generation object. Example from langchain.llms import PromptLayerOpenAIChat
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-128
Generation object. Example from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field allowed_special: Union[Literal['all'], AbstractSet[str]] = {}# Set of special tokens that are allowed。 field disallowed_special: Union[Literal['all'], Collection[str]] = 'all'# Set of special tokens that are not allowed。 field max_retries: int = 6# Maximum number of retries to make when generating. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field model_name: str = 'gpt-3.5-turbo'# Model name to use. field prefix_messages: List [Optional]# Series of messages for Chat input. field streaming: bool = False# Whether to stream the results or not. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-129
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-130
Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token IDs using the tiktoken package. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-131
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.RWKV[source]# Wrapper around RWKV language models. To use, you should have the rwkv python package installed, the pre-trained model file, and the model’s config information. Example from langchain.llms import RWKV model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation response = model("Once upon a time, ") Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field CHUNK_LEN: int = 256# Batch size for prompt processing. field max_tokens_per_generation: int = 256# Maximum number of tokens to generate. field model: str [Required]# Path to the pre-trained RWKV model file. field penalty_alpha_frequency: float = 0.4# Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-132
in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.. field penalty_alpha_presence: float = 0.4# Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.. field rwkv_verbose: bool = True# Print debug information. field strategy: str = 'cpu fp32'# Token context window. field temperature: float = 1.0# The temperature to use for sampling. field tokens_path: str [Required]# Path to the RWKV tokens file. field top_p: float = 0.5# The top-p value to use for sampling. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-133
Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-134
Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-135
Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Replicate[source]# Wrapper around Replicate models. To use, you should have the replicate python package installed, and the environment variable REPLICATE_API_TOKEN set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, …} Example Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-136
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-137
Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-138
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SagemakerEndpoint[source]# Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field content_handler: langchain.llms.sagemaker_endpoint.LLMContentHandler [Required]# The content handler class that provides an input and output transform functions to handle formats between LLM
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-139
The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. field credentials_profile_name: Optional[str] = None# The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html field endpoint_kwargs: Optional[Dict] = None# Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> field endpoint_name: str = ''# The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region. field model_kwargs: Optional[Dict] = None# Key word arguments to pass to the model. field region_name: str = ''# The aws region where the Sagemaker model is deployed, eg. us-west-2. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-140
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-141
Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-142
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SelfHostedHuggingFaceLLM[source]# Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the runhouse python package installed. Only supports text-generation, text2text-generation and summarization for now. Example using from_model_id:from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu )
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-143
hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable):from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) Validators raise_deprecation » all fields set_verbose » verbose field device: int = 0# Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc. field hardware: Any = None# Remote hardware to send the inference function to. field inference_fn: Callable = <function _generate_text># Inference function to send to the remote hardware. field load_fn_kwargs: Optional[dict] = None# Key word arguments to pass to the model load function. field model_id: str = 'gpt2'# Hugging Face model_id to load the model. field model_kwargs: Optional[dict] = None# Key word arguments to pass to the model. field model_load_fn: Callable = <function _load_transformer># Function to load the model remotely on the server. field model_reqs: List[str] = ['./', 'transformers', 'torch']# Requirements to install on hardware to inference the model. field task: str = 'text-generation'# Hugging Face task (“text-generation”, “text2text-generation” or
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-144
Hugging Face task (“text-generation”, “text2text-generation” or “summarization”). field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-145
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM# Init the SelfHostedPipeline from a pipeline object or string. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-146
Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-147
Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SelfHostedPipeline[source]# Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the runhouse python package installed. Example for custom pipeline and inference functions:from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server):from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"],
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-148
hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models:from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Validators raise_deprecation » all fields set_verbose » verbose field hardware: Any = None# Remote hardware to send the inference function to. field inference_fn: Callable = <function _generate_text># Inference function to send to the remote hardware. field load_fn_kwargs: Optional[dict] = None# Key word arguments to pass to the model load function. field model_load_fn: Callable [Required]# Function to load the model remotely on the server. field model_reqs: List[str] = ['./', 'torch']# Requirements to install on hardware to inference the model. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-149
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-150
Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM[source]# Init the SelfHostedPipeline from a pipeline object or string. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-151
Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.StochasticAI[source]# Wrapper around StochasticAI large language models. To use, you should have the environment variable STOCHASTICAI_API_KEY set with your API key. Example from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") Validators build_extra » all fields
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-152
stochasticai = StochasticAI(api_url="") Validators build_extra » all fields raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field api_url: str = ''# Model name to use. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-153
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-154
Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.VertexAI[source]# Wrapper around Google Vertex AI large language models. Validators raise_deprecation » all fields
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-155
Wrapper around Google Vertex AI large language models. Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field credentials: Any = None# The default custom credentials (google.auth.credentials.Credentials) to use field location: str = 'us-central1'# The default location to use when making API calls. field max_output_tokens: int = 128# Token limit determines the maximum amount of text output from one prompt. field project: Optional[str] = None# The default GCP project to use when making Vertex API calls. field temperature: float = 0.0# Sampling temperature, it controls the degree of randomness in token selection. field top_k: int = 40# How the model selects tokens for output, the next token is selected from field top_p: float = 0.95# Tokens are selected from most probable to least until the sum of their field tuned_model_name: Optional[str] = None# The name of a tuned model, if it’s provided, model_name is ignored. field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-156
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-157
Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-158
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Writer[source]# Wrapper around Writer large language models. To use, you should have the environment variable WRITER_API_KEY and WRITER_ORG_ID set with your API key and organization ID respectively. Example from langchain import Writer writer = Writer(model_id="palmyra-base") Validators raise_deprecation » all fields set_verbose » verbose validate_environment » all fields field base_url: Optional[str] = None# Base url to use, if None decides based on model name. field best_of: Optional[int] = None# Generates this many completions server-side and returns the “best”. field logprobs: bool = False# Whether to return log probabilities. field max_tokens: Optional[int] = None# Maximum number of tokens to generate. field min_tokens: Optional[int] = None# Minimum number of tokens to generate. field model_id: str = 'palmyra-instruct'# Model name to use.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-159
field model_id: str = 'palmyra-instruct'# Model name to use. field n: Optional[int] = None# How many completions to generate. field presence_penalty: Optional[float] = None# Penalizes repeated tokens regardless of frequency. field repetition_penalty: Optional[float] = None# Penalizes repeated tokens according to frequency. field stop: Optional[List[str]] = None# Sequences when completion generation will stop. field temperature: Optional[float] = None# What sampling temperature to use. field top_p: Optional[float] = None# Total probability mass of tokens to consider at each step. field verbose: bool [Optional]# Whether to print out response text. field writer_api_key: Optional[str] = None# Writer API key. field writer_org_id: Optional[str] = None# Writer organization ID. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-160
Take in a list of prompt values and return an LLMResult. async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = ‘allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) → Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-161
Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) → int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int# Get the number of tokens in the message. get_token_ids(text: str) → List[int]# Get the token present in the text. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). predict(text: str, *, stop: Optional[Sequence[str]] = None) → str# Predict text from text. predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# Predict message from messages. save(file_path: Union[pathlib.Path, str]) → None# Save the LLM. Parameters
https://python.langchain.com/en/latest/reference/modules/llms.html
961e6b14eed3-162
Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) → None# Try to update ForwardRefs on fields based on this Model, globalns and localns. previous Writer next Chat Models By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/reference/modules/llms.html
165b0d984e00-0
.rst .pdf Vector Stores Vector Stores# Wrappers on top of vector stores. class langchain.vectorstores.AnalyticDB(connection_string: str, embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None)[source]# VectorStore implementation using AnalyticDB. AnalyticDB is a distributed full PostgresSQL syntax cloud-native database. - connection_string is a postgres connection string. - embedding_function any embedding function implementing langchain.embeddings.base.Embeddings interface. collection_name is the name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection.The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. pre_delete_collection if True, will delete the collection if it exists.(default: False) - Useful for testing. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. connect() → sqlalchemy.engine.base.Connection[source]# classmethod connection_string_from_db_params(driver: str, host: str, port: int, database: str, user: str, password: str) → str[source]# Return connection string from database parameters.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-1
Return connection string from database parameters. create_collection() → None[source]# create_tables_if_not_exists() → None[source]# delete_collection() → None[source]# drop_tables() → None[source]# classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from documents and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from texts and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. get_collection(session: sqlalchemy.orm.session.Session) → Optional[langchain.vectorstores.analyticdb.CollectionStore][source]# classmethod get_connection_string(kwargs: Dict[str, Any]) → str[source]# similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AnalyticDB with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-2
k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]# class langchain.vectorstores.Annoy(embedding_function: Callable, index: Any, metric: str, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: Dict[int, str])[source]# Wrapper around Annoy vector database. To use, you should have the annoy python package installed. Example from langchain import Annoy
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-3
Example from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from embeddings. Parameters text_embeddings – List of tuples of (text, embedding) embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: Creates an in memory docstore with provided embeddings Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-4
text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from raw documents. Parameters texts – List of documents to index. embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings) → langchain.vectorstores.annoy.Annoy[source]# Load Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-5
and index_to_docstore_id from. embeddings – Embeddings to use when generating queries. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. fetch_k – Number of Documents to fetch to pass to MMR algorithm. k – Number of Documents to return. Defaults to 4. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-6
Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. process_index_results(idxs: List[int], dists: List[float]) → List[Tuple[langchain.schema.Document, float]][source]# Turns annoy results into a list of documents and scores. Parameters idxs – List of indices of the documents in the index. dists – List of distances of the documents in the index. Returns List of Documents and scores. save_local(folder_path: str, prefault: bool = False) → None[source]# Save Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. prefault – Whether to pre-load the index into memory. similarity_search(query: str, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query. similarity_search_by_index(docstore_index: int, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to docstore_index. Parameters docstore_index – Index of document in docstore k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the embedding.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-7
to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_index(docstore_index: int, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-8
Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each class langchain.vectorstores.AtlasDB(name: str, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False)[source]# Wrapper around Atlas: Nomic’s neural database and rhizomatic instrument. To use, you should have the nomic python package installed. Example from langchain.vectorstores import AtlasDB from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = AtlasDB("my_project", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]]) – An optional list of ids.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-9
ids (Optional[List[str]]) – An optional list of ids. refresh (bool) – Whether or not to refresh indices with the updated data. Default True. Returns List of IDs of the added texts. Return type List[str] create_index(**kwargs: Any) → Any[source]# Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. classmethod from_documents(documents: List[langchain.schema.Document], embedding: Optional[langchain.embeddings.base.Embeddings] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, persist_directory: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a list of documents. Parameters name (str) – Name of the collection to create. api_key (str) – Your nomic API key, documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-10
index_kwargs (Optional[dict]) – Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a raw documents. Parameters texts (List[str]) – The list of texts to ingest. name (str) – Name of the project to create. api_key (str) – Your nomic API key, embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-11
Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AtlasDB Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. Returns List of documents most similar to the query text. Return type List[Document] class langchain.vectorstores.Chroma(collection_name: str = 'langchain', embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None)[source]# Wrapper around ChromaDB embeddings platform. To use, you should have the chromadb python package installed. Example from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type List[str] delete_collection() → None[source]# Delete the collection.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-12
Return type List[str] delete_collection() → None[source]# Delete the collection. classmethod from_documents(documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection. ids (Optional[List[str]]) – List of document IDs. Defaults to None. documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma classmethod from_texts(texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-13
Otherwise, the data will be ephemeral in-memory. Parameters texts (List[str]) – List of texts to add to the collection. collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma get(include: Optional[List[str]] = None) → Dict[str, Any][source]# Gets the collection. Parameters include (Optional[List[str]]) – List of fields to include from db. Defaults to None. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-14
filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with Chroma. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-15
filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the query text. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. :param embedding: Embedding to look up documents similar to. :type embedding: str :param k: Number of Documents to return. Defaults to 4. :type k: int :param filter: Filter by metadata. Defaults to None. :type filter: Optional[Dict[str, str]] Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Chroma with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]] update_document(document_id: str, document: langchain.schema.Document) → None[source]# Update a document in the collection. Parameters document_id (str) – ID of the document to update. document (Document) – Document to update.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-16
document (Document) – Document to update. class langchain.vectorstores.DeepLake(dataset_path: str = './deeplake/', token: Optional[str] = None, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, read_only: Optional[bool] = False, ingestion_batch_size: int = 1024, num_workers: int = 0, verbose: bool = True, **kwargs: Any)[source]# Wrapper around Deep Lake, a data lake for deep learning applications. We implement naive similarity search and filtering for fast prototyping, but it can be extended with Tensor Query Language (TQL) for production use cases over billion rows. Why Deep Lake? Not only stores embeddings, but also the original data with version control. Serverless, doesn’t require another service and can be used with majorcloud providers (S3, GCS, etc.) More than just a multi-modal vector store. You can use the datasetto fine-tune your own LLM models. To use, you should have the deeplake python package installed. Example from langchain.vectorstores import DeepLake from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-17
Returns List of IDs of the added texts. Return type List[str] delete(ids: Any[List[str], None] = None, filter: Any[Dict[str, str], None] = None, delete_all: Any[bool, None] = None) → bool[source]# Delete the entities in the dataset Parameters ids (Optional[List[str]], optional) – The document_ids to delete. Defaults to None. filter (Optional[Dict[str, str]], optional) – The filter to delete by. Defaults to None. delete_all (Optional[bool], optional) – Whether to drop the dataset. Defaults to None. delete_dataset() → None[source]# Delete the collection. classmethod force_delete_by_path(path: str) → None[source]# Force delete dataset by path classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = './deeplake/', **kwargs: Any) → langchain.vectorstores.deeplake.DeepLake[source]# Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at ./deeplake Parameters path (str, pathlib.Path) – The full path to the dataset. Can be: Deep Lake cloud path of the form hub://username/dataset_name.To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use ‘activeloop login’ from command line) AWS S3 path of the form s3://bucketname/path/to/dataset.Credentials are required in either the environment Google Cloud Storage path of the form``gcs://bucketname/path/to/dataset``Credentials are required
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-18
in either the environment Local file system path of the form ./path/to/dataset or~/path/to/dataset or path/to/dataset. In-memory path of the form mem://path/to/dataset which doesn’tsave the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist. documents (List[Document]) – List of documents to add. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. Returns Deep Lake dataset. Return type DeepLake max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-19
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – text to embed and run the query on. k – Number of Documents to return. Defaults to 4. query – Text to look up documents similar to. embedding – Embedding function to use. Defaults to None. k – Number of Documents to return. Defaults to 4. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product Defaults to L2. filter – Attribute filter by metadata example {‘key’: ‘value’}. Defaults to None. maximal_marginal_relevance – Whether to use maximal marginal relevance. Defaults to False. fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score – Whether to return the score. Defaults to False. Returns List of Documents most similar to the query vector.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-20
Returns List of Documents most similar to the query vector. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, distance_metric: str = 'L2', k: int = 4, filter: Optional[Dict[str, str]] = None) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Deep Lake with distance returned. Parameters query (str) – Query text to search for. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product. Defaults to L2. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]] class langchain.vectorstores.DocArrayHnswSearch(doc_index: BaseDocIndex, embedding: langchain.embeddings.base.Embeddings)[source]# Wrapper around HnswLib storage. To use it, you should have the docarray package with version >=0.32.0 installed. You can install it with pip install “langchain[docarray]”.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-21
You can install it with pip install “langchain[docarray]”. classmethod from_params(embedding: langchain.embeddings.base.Embeddings, work_dir: str, n_dim: int, dist_metric: Literal['cosine', 'ip', 'l2'] = 'cosine', max_elements: int = 1024, index: bool = True, ef_construction: int = 200, ef: int = 10, M: int = 16, allow_replace_deleted: bool = True, num_threads: int = 1, **kwargs: Any) → langchain.vectorstores.docarray.hnsw.DocArrayHnswSearch[source]# Initialize DocArrayHnswSearch store. Parameters embedding (Embeddings) – Embedding function. work_dir (str) – path to the location where all the data will be stored. n_dim (int) – dimension of an embedding. dist_metric (str) – Distance metric for DocArrayHnswSearch can be one of: “cosine”, “ip”, and “l2”. Defaults to “cosine”. max_elements (int) – Maximum number of vectors that can be stored. Defaults to 1024. index (bool) – Whether an index should be built for this field. Defaults to True. ef_construction (int) – defines a construction time/accuracy trade-off. Defaults to 200. ef (int) – parameter controlling query time/accuracy trade-off. Defaults to 10. M (int) – parameter that defines the maximum number of outgoing connections in the graph. Defaults to 16. allow_replace_deleted (bool) – Enables replacing of deleted elements with new added ones. Defaults to True. num_threads (int) – Sets the number of cpu threads to use. Defaults to 1.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-22
num_threads (int) – Sets the number of cpu threads to use. Defaults to 1. **kwargs – Other keyword arguments to be passed to the get_doc_cls method. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, work_dir: Optional[str] = None, n_dim: Optional[int] = None, **kwargs: Any) → langchain.vectorstores.docarray.hnsw.DocArrayHnswSearch[source]# Create an DocArrayHnswSearch store and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. work_dir (str) – path to the location where all the data will be stored. n_dim (int) – dimension of an embedding. **kwargs – Other keyword arguments to be passed to the __init__ method. Returns DocArrayHnswSearch Vector Store class langchain.vectorstores.DocArrayInMemorySearch(doc_index: BaseDocIndex, embedding: langchain.embeddings.base.Embeddings)[source]# Wrapper around in-memory storage for exact search. To use it, you should have the docarray package with version >=0.32.0 installed. You can install it with pip install “langchain[docarray]”. classmethod from_params(embedding: langchain.embeddings.base.Embeddings, metric: Literal['cosine_sim', 'euclidian_dist', 'sgeuclidean_dist'] = 'cosine_sim', **kwargs: Any) → langchain.vectorstores.docarray.in_memory.DocArrayInMemorySearch[source]# Initialize DocArrayInMemorySearch store. Parameters embedding (Embeddings) – Embedding function.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-23
Parameters embedding (Embeddings) – Embedding function. metric (str) – metric for exact nearest-neighbor search. Can be one of: “cosine_sim”, “euclidean_dist” and “sqeuclidean_dist”. Defaults to “cosine_sim”. **kwargs – Other keyword arguments to be passed to the get_doc_cls method. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any) → langchain.vectorstores.docarray.in_memory.DocArrayInMemorySearch[source]# Create an DocArrayInMemorySearch store and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[Dict[Any, Any]]]) – Metadata for each text if it exists. Defaults to None. metric (str) – metric for exact nearest-neighbor search. Can be one of: “cosine_sim”, “euclidean_dist” and “sqeuclidean_dist”. Defaults to “cosine_sim”. Returns DocArrayInMemorySearch Vector Store class langchain.vectorstores.ElasticVectorSearch(elasticsearch_url: str, index_name: str, embedding: langchain.embeddings.base.Embeddings, *, ssl_verify: Optional[Dict[str, Any]] = None)[source]# Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index",
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-24
elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the “Deployments” page. To obtain your Elastic Cloud password for the default “elastic” user: Log in to the Elastic Cloud console at https://cloud.elastic.co Go to “Security” > “Users” Locate the “elastic” user and click “Edit” Click “Reset password” Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Parameters elasticsearch_url (str) – The URL for the Elasticsearch instance. index_name (str) – The name of the Elasticsearch index for the embeddings. embedding (Embeddings) – An object that provides the ability to embed text.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-25
embedding (Embeddings) – An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises ValueError – If the elasticsearch python package is not installed. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. refresh_indices – bool to refresh ElasticSearch indices Returns List of ids from adding the texts into the vectorstore. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, elasticsearch_url: Optional[str] = None, index_name: Optional[str] = None, refresh_indices: bool = True, **kwargs: Any) → langchain.vectorstores.elastic_vector_search.ElasticVectorSearch[source]# Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in the Elasticsearch instance. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" )
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-26
embeddings, elasticsearch_url="http://localhost:9200" ) similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. class langchain.vectorstores.FAISS(embedding_function: typing.Callable, index: typing.Any, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: typing.Dict[int, str], relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_relevance_score_fn>, normalize_L2: bool = False)[source]# Wrapper around FAISS vector database. To use, you should have the faiss python package installed. Example from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) add_embeddings(text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-27
Run more texts through the embeddings and add to the vectorstore. Parameters text_embeddings – Iterable pairs of string and embedding to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of unique IDs. Returns List of ids from adding the texts into the vectorstore. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of unique IDs. Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings)
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-28
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings, index_name: str = 'index') → langchain.vectorstores.faiss.FAISS[source]# Load FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries index_name – for saving with a specific index file name max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-29
fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. merge_from(target: langchain.vectorstores.faiss.FAISS) → None[source]# Merge another FAISS object with the current one. Add the target FAISS to the current one. Parameters target – FAISS object you wish to merge into the current one Returns None. save_local(folder_path: str, index_name: str = 'index') → None[source]# Save FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. index_name – for saving with a specific index file name
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-30
and index_to_docstore_id to. index_name – for saving with a specific index file name similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters embedding – Embedding vector to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-31
Returns List of Documents most similar to the query and score for each class langchain.vectorstores.LanceDB(connection: Any, embedding: langchain.embeddings.base.Embeddings, vector_key: Optional[str] = 'vector', id_key: Optional[str] = 'id', text_key: Optional[str] = 'text')[source]# Wrapper around LanceDB vector database. To use, you should have lancedb python package installed. Example db = lancedb.connect('./lancedb') table = db.open_table('my_table') vectorstore = LanceDB(table, embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Turn texts into embedding and add it to the database Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of ids to associate with the texts. Returns List of ids of the added texts. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, connection: Any = None, vector_key: Optional[str] = 'vector', id_key: Optional[str] = 'id', text_key: Optional[str] = 'text', **kwargs: Any) → langchain.vectorstores.lancedb.LanceDB[source]# Return VectorStore initialized from texts and embeddings. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return documents most similar to the query Parameters
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-32
Return documents most similar to the query Parameters query – String to query the vectorstore with. k – Number of documents to return. Returns List of documents most similar to the query. class langchain.vectorstores.Milvus(embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]# Wrapper around the Milvus vector database. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any) → List[str][source]# Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict. Metada keys will need to be present for all inserted values. At the moment there is no None equivalent in Milvus. Parameters texts (Iterable[str]) – The texts to embed, it is assumed that they all fit in memory. metadatas (Optional[List[dict]]) – Metadata dicts attached to each of the texts. Defaults to None. timeout (Optional[int]) – Timeout for each batch insert. Defaults to None. batch_size (int, optional) – Batch size to use for insertion. Defaults to 1000. Raises MilvusException – Failure to add texts Returns The resulting keys for each inserted element. Return type
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-33
Returns The resulting keys for each inserted element. Return type List[str] classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {'host': 'localhost', 'password': '', 'port': '19530', 'secure': False, 'user': ''}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → langchain.vectorstores.milvus.Milvus[source]# Create a Milvus collection, indexes it with HNSW, and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. collection_name (str, optional) – Collection name to use. Defaults to “LangChainCollection”. connection_args (dict[str, Any], optional) – Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional) – Which consistency level to use. Defaults to “Session”. index_params (Optional[dict], optional) – Which index_params to use. Defaults to None. search_params (Optional[dict], optional) – Which search params to use. Defaults to None. drop_old (Optional[bool], optional) – Whether to drop the collection with that name if it exists. Defaults to False. Returns Milvus Vector Store Return type Milvus
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-34
Returns Milvus Vector Store Return type Milvus max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a search and return results that are reordered by MMR. Parameters query (str) – The text being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] max_marginal_relevance_search_by_vector(embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a search and return results that are reordered by MMR. Parameters embedding (str) – The embedding vector being searched.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-35
Parameters embedding (str) – The embedding vector being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters query (str) – The text to search. k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document]
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-36
Returns Document results for search. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters embedding (List[float]) – The embedding vector to search. k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search_with_score(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters query (str) – The text being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-37
Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Return type List[float], List[Tuple[Document, any, any]] similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters embedding (List[float]) – The embedding vector being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Result doc and score. Return type List[Tuple[Document, float]] class langchain.vectorstores.MyScale(embedding: langchain.embeddings.base.Embeddings, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, **kwargs: Any)[source]# Wrapper around MyScale vector database You need a clickhouse-connect python package, and a valid account to connect to MyScale. MyScale can not only search with simple vector indexes,
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-38
to connect to MyScale. MyScale can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit[myscale official site](https://docs.myscale.com/en/overview/) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. ids – Optional list of ids to associate with the texts. batch_size – Batch size of insertion metadata – Optional column data to be inserted Returns List of ids from adding the texts into the vectorstore. drop() → None[source]# Helper function: Drop data escape_str(value: str) → str[source]# classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any) → langchain.vectorstores.myscale.MyScale[source]# Create Myscale wrapper with existing texts Parameters embedding_function (Embeddings) – Function to extract text embedding texts (Iterable[str]) – List or tuple of strings to be added config (MyScaleSettings, Optional) – Myscale configuration text_ids (Optional[Iterable], optional) – IDs for the texts. Defaults to None. batch_size (int, optional) – Batchsize when transmitting data to MyScale. Defaults to 32.
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-39
Defaults to 32. metadata (List[dict], optional) – metadata to texts. Defaults to None. into (Other keyword arguments will pass) – [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns MyScale Index property metadata_column: str# similarity_search(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of Documents Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale by vectors Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of (Document, similarity) Return type
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-40
Returns List of (Document, similarity) Return type List[Document] similarity_search_with_relevance_scores(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of documents Return type List[Document] pydantic settings langchain.vectorstores.MyScaleSettings[source]# MyScale Client Configuration Attribute: myscale_host (str)An URL to connect to MyScale backend.Defaults to ‘localhost’. myscale_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Usernamed to login. Defaults to None. password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (dict): index build parameter. database (str) : Database name to find the table. Defaults to ‘default’. table (str) : Table name to operate on. Defaults to ‘vector_table’. metric (str)Metric to compute distance,supported are (‘l2’, ‘cosine’, ‘ip’). Defaults to ‘cosine’. column_map (Dict)Column type map to project column name onto langchainsemantics. Must have keys: text, id, vector,
https://python.langchain.com/en/latest/reference/modules/vectorstores.html
165b0d984e00-41
must be same size to number of columns. For example: .. code-block:: python { ‘id’: ‘text_id’, ‘vector’: ‘text_embedding’, ‘text’: ‘text_plain’, ‘metadata’: ‘metadata_dictionary_in_json’, } Defaults to identity map. Show JSON schema{ "title": "MyScaleSettings", "description": "MyScale Client Configuration\n\nAttribute:\n myscale_host (str) : An URL to connect to MyScale backend.\n Defaults to 'localhost'.\n myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.\n username (str) : Usernamed to login. Defaults to None.\n password (str) : Password to login. Defaults to None.\n index_type (str): index type string.\n index_param (dict): index build parameter.\n database (str) : Database name to find the table. Defaults to 'default'.\n table (str) : Table name to operate on.\n Defaults to 'vector_table'.\n metric (str) : Metric to compute distance,\n supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.\n column_map (Dict) : Column type map to project column name onto langchain\n semantics. Must have keys: `text`, `id`, `vector`,\n must be same size to number of columns. For example:\n .. code-block:: python\n {\n 'id': 'text_id',\n 'vector': 'text_embedding',\n 'text': 'text_plain',\n 'metadata': 'metadata_dictionary_in_json',\n }\n\n Defaults to identity map.", "type": "object", "properties": {
https://python.langchain.com/en/latest/reference/modules/vectorstores.html