id
stringlengths 14
16
| text
stringlengths 31
2.41k
| source
stringlengths 53
121
|
---|---|---|
7744ce54b709-0
|
Source code for langchain.llms.beam
"""Wrapper around Beam API."""
import base64
import json
import logging
import subprocess
import textwrap
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
DEFAULT_NUM_TRIES = 10
DEFAULT_SLEEP_TIME = 4
[docs]class Beam(LLM):
"""Wrapper around Beam API for gpt2 large language model.
To use, you should have the ``beam-sdk`` python package installed,
and the environment variable ``BEAM_CLIENT_ID`` set with your client id
and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how
to get these is available here: https://docs.beam.cloud/account/api-keys.
The wrapper can then be called as follows, where the name, cpu, memory, gpu,
python version, and python packages can be updated accordingly. Once deployed,
the instance can be called.
Example:
.. code-block:: python
llm = Beam(model_name="gpt2",
name="langchain-gpt2",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length=50)
llm._deploy()
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
7744ce54b709-1
|
max_length=50)
llm._deploy()
call_result = llm._call(input)
"""
model_name: str = ""
name: str = ""
cpu: str = ""
memory: str = ""
gpu: str = ""
python_version: str = ""
python_packages: List[str] = []
max_length: str = ""
url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
beam_client_id: str = ""
beam_client_secret: str = ""
app_id: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
7744ce54b709-2
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(
values, "beam_client_id", "BEAM_CLIENT_ID"
)
beam_client_secret = get_from_dict_or_env(
values, "beam_client_secret", "BEAM_CLIENT_SECRET"
)
values["beam_client_id"] = beam_client_id
values["beam_client_secret"] = beam_client_secret
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"name": self.name,
"cpu": self.cpu,
"memory": self.memory,
"gpu": self.gpu,
"python_version": self.python_version,
"python_packages": self.python_packages,
"max_length": self.max_length,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "beam"
[docs] def app_creation(self) -> None:
"""Creates a Python file which will contain your Beam app definition."""
script = textwrap.dedent(
"""\
import beam
# The environment your code will run on
app = beam.App(
name="{name}",
cpu={cpu},
memory="{memory}",
gpu="{gpu}",
python_version="{python_version}",
python_packages={python_packages},
)
app.Trigger.RestAPI(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
7744ce54b709-3
|
python_packages={python_packages},
)
app.Trigger.RestAPI(
inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}},
outputs={{"text": beam.Types.String()}},
handler="run.py:beam_langchain",
)
"""
)
script_name = "app.py"
with open(script_name, "w") as file:
file.write(
script.format(
name=self.name,
cpu=self.cpu,
memory=self.memory,
gpu=self.gpu,
python_version=self.python_version,
python_packages=self.python_packages,
)
)
[docs] def run_creation(self) -> None:
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output)
return {{"text": output}}
"""
)
script_name = "run.py"
with open(script_name, "w") as file:
file.write(script.format(model_name=self.model_name))
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
7744ce54b709-4
|
file.write(script.format(model_name=self.model_name))
def _deploy(self) -> str:
"""Call to Beam."""
try:
import beam # type: ignore
if beam.__path__ == "":
raise ImportError
except ImportError:
raise ImportError(
"Could not import beam python package. "
"Please install it with `curl "
"https://raw.githubusercontent.com/slai-labs"
"/get-beam/main/get-beam.sh -sSfL | sh`."
)
self.app_creation()
self.run_creation()
process = subprocess.run(
"beam deploy app.py", shell=True, capture_output=True, text=True
)
if process.returncode == 0:
output = process.stdout
logger.info(output)
lines = output.split("\n")
for line in lines:
if line.startswith(" i Send requests to: https://apps.beam.cloud/"):
self.app_id = line.split("/")[-1]
self.url = line.split(":")[1].strip()
return self.app_id
raise ValueError(
f"""Failed to retrieve the appID from the deployment output.
Deployment output: {output}"""
)
else:
raise ValueError(f"Deployment failed. Error: {process.stderr}")
@property
def authorization(self) -> str:
if self.beam_client_id:
credential_str = self.beam_client_id + ":" + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
def _call(
self,
prompt: str,
stop: Optional[list] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
7744ce54b709-5
|
self,
prompt: str,
stop: Optional[list] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Beam."""
url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url
payload = {"prompt": prompt, "max_length": self.max_length}
payload.update(kwargs)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Authorization": "Basic " + self.authorization,
"Connection": "keep-alive",
"Content-Type": "application/json",
}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["text"]
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning("Unable to successfully call model.")
return ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/beam.html
|
6424d3c807bc-0
|
Source code for langchain.llms.gpt4all
"""Wrapper for the GPT4All model."""
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
[docs]class GPT4All(LLM):
r"""Wrapper around GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
6424d3c807bc-1
|
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.3
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(1, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
context_erase: float = 0.5
"""Leave (n_ctx * context_erase) tokens
starting from beginning if the context has run out."""
allow_download: bool = False
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
6424d3c807bc-2
|
starting from beginning if the context has run out."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"n_ctx",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
"context_erase",
}
def _default_params(self) -> Dict[str, Any]:
return {
"n_ctx": self.n_ctx,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
"context_erase": self.context_erase,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
6424d3c807bc-3
|
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
6424d3c807bc-4
|
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
|
091abb7a9dc7-0
|
Source code for langchain.llms.mosaicml
"""Wrapper around MosaicML APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
[docs]class MosaicML(LLM):
"""Wrapper around MosaicML's LLM inference service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import MosaicML
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
mosaic_llm = MosaicML(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
|
091abb7a9dc7-1
|
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "mosaic"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
|
091abb7a9dc7-2
|
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a MosaicML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = mosaic_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {"input_strings": [prompt]}
payload.update(_model_kwargs)
payload.update(kwargs)
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
parsed_response = response.json()
if "error" in parsed_response:
# if we get rate limited, try sleeping for 1 second
if (
not is_retry
and "rate limit exceeded" in parsed_response["error"].lower()
):
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
|
091abb7a9dc7-3
|
raise ValueError(
f"Error raised by inference API: {parsed_response['error']}"
)
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
if "data" in parsed_response:
output_item = parsed_response["data"]
elif "output" in parsed_response:
output_item = parsed_response["output"]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
if isinstance(output_item, list):
text = output_item[0]
else:
text = output_item
elif isinstance(parsed_response, list):
first_item = parsed_response[0]
if isinstance(first_item, str):
text = first_item
elif isinstance(first_item, dict):
if "output" in parsed_response:
text = first_item["output"]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
else:
raise ValueError(f"Unexpected response format: {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
text = text[len(prompt) :]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/mosaicml.html
|
fffc8ddb2863-0
|
Source code for langchain.llms.self_hosted_hugging_face
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware."""
import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
|
fffc8ddb2863-1
|
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
|
fffc8ddb2863-2
|
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
[docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
|
fffc8ddb2863-3
|
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
|
fffc8ddb2863-4
|
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/self_hosted_hugging_face.html
|
c1572e618812-0
|
Source code for langchain.llms.writer
"""Wrapper around Writer APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class Writer(LLM):
"""Wrapper around Writer large language models.
To use, you should have the environment variable ``WRITER_API_KEY`` and
``WRITER_ORG_ID`` set with your API key and organization ID respectively.
Example:
.. code-block:: python
from langchain import Writer
writer = Writer(model_id="palmyra-base")
"""
writer_org_id: Optional[str] = None
"""Writer organization ID."""
model_id: str = "palmyra-instruct"
"""Model name to use."""
min_tokens: Optional[int] = None
"""Minimum number of tokens to generate."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
temperature: Optional[float] = None
"""What sampling temperature to use."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens regardless of frequency."""
repetition_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
best_of: Optional[int] = None
"""Generates this many completions server-side and returns the "best"."""
logprobs: bool = False
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
|
c1572e618812-1
|
logprobs: bool = False
"""Whether to return log probabilities."""
n: Optional[int] = None
"""How many completions to generate."""
writer_api_key: Optional[str] = None
"""Writer API key."""
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and organization id exist in environment."""
writer_api_key = get_from_dict_or_env(
values, "writer_api_key", "WRITER_API_KEY"
)
values["writer_api_key"] = writer_api_key
writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID")
values["writer_org_id"] = writer_org_id
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"minTokens": self.min_tokens,
"maxTokens": self.max_tokens,
"temperature": self.temperature,
"topP": self.top_p,
"stop": self.stop,
"presencePenalty": self.presence_penalty,
"repetitionPenalty": self.repetition_penalty,
"bestOf": self.best_of,
"logprobs": self.logprobs,
"n": self.n,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
|
c1572e618812-2
|
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id, "writer_org_id": self.writer_org_id},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "writer"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
"https://enterprise-api.writer.com/llm"
f"/organization/{self.writer_org_id}"
f"/model/{self.model_id}/completions"
)
params = {**self._default_params, **kwargs}
response = requests.post(
url=base_url,
headers={
"Authorization": f"{self.writer_api_key}",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": prompt, **params},
)
text = response.text
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
|
c1572e618812-3
|
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/writer.html
|
5294a5213db8-0
|
Source code for langchain.llms.modal
"""Wrapper around Modal API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
[docs]class Modal(LLM):
"""Wrapper around Modal large language models.
To use, you should have the ``modal-client`` python package installed.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import Modal
modal = Modal(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/modal.html
|
5294a5213db8-1
|
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "modal"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Modal endpoint."""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response = requests.post(
url=self.endpoint_url,
headers={
"Content-Type": "application/json",
},
json={"prompt": prompt, **params},
)
try:
if prompt in response.json()["prompt"]:
response_json = response.json()
except KeyError:
raise ValueError("LangChain requires 'prompt' key in response.")
text = response_json["prompt"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/modal.html
|
396539f8144f-0
|
Source code for langchain.llms.huggingface_hub
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceHub(LLM):
"""Wrapper around HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
|
396539f8144f-1
|
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"repo_id": self.repo_id, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_hub"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
|
396539f8144f-2
|
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
params = {**_model_kwargs, **kwargs}
response = self.client(inputs=prompt, params=params)
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.client.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
|
c35c8ce2ebea-0
|
Source code for langchain.llms.deepinfra
"""Wrapper around DeepInfra APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
[docs]class DeepInfra(LLM):
"""Wrapper around DeepInfra deployed models.
To use, you should have the ``requests`` python package installed, and the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
c35c8ce2ebea-1
|
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"input": prompt, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
c35c8ce2ebea-2
|
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
text = t["results"][0]["generated_text"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/deepinfra.html
|
7f60c7eae2ab-0
|
Source code for langchain.llms.clarifai
"""Wrapper around Clarifai's APIs."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class Clarifai(LLM):
"""Wrapper around Clarifai's large language models.
To use, you should have an account on the Clarifai platform,
the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT_KEY`` set with your PAT key,
or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Clarifai
clarifai_llm = Clarifai(clarifai_pat_key=CLARIFAI_PAT_KEY, \
user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
"""
stub: Any #: :meta private:
metadata: Any
userDataObject: Any
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
clarifai_pat_key: Optional[str] = None
api_base: str = "https://api.clarifai.com"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
|
7f60c7eae2ab-1
|
api_base: str = "https://api.clarifai.com"
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values["clarifai_pat_key"] = get_from_dict_or_env(
values, "clarifai_pat_key", "CLARIFAI_PAT_KEY"
)
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["clarifai_pat_key"] is None:
raise ValueError("Please provide a clarifai_pat_key.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_id": self.model_id}}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "clarifai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
|
7f60c7eae2ab-2
|
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> str:
"""Call out to Clarfai's PostModelOutputs endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = clarifai_llm("Tell me a joke.")
"""
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=self.user_id,
app_id=self.app_id,
pat=self.clarifai_pat_key,
base=self.api_base,
)
self.userDataObject = auth.get_user_app_id_proto()
self.stub = create_stub(auth)
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
# The userDataObject is created in the overview and
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
|
7f60c7eae2ab-3
|
# The userDataObject is created in the overview and
# is required when using a PAT
# If version_id None, Defaults to the latest model version
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
raise Exception(
"Post model outputs failed, status: "
+ post_model_outputs_response.status.description
)
text = post_model_outputs_response.outputs[0].data.text.raw
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/clarifai.html
|
c5ad16128bb0-0
|
Source code for langchain.llms.cerebriumai
"""Wrapper around CerebriumAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class CerebriumAI(LLM):
"""Wrapper around CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed, and the
environment variable ``CEREBRIUMAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
|
c5ad16128bb0-1
|
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = get_from_dict_or_env(
values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY"
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to CerebriumAI endpoint."""
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
|
c5ad16128bb0-2
|
"""Call to CerebriumAI endpoint."""
try:
from cerebrium import model_api_request
except ImportError:
raise ValueError(
"Could not import cerebrium python package. "
"Please install it with `pip install cerebrium`."
)
params = self.model_kwargs or {}
response = model_api_request(
self.endpoint_url,
{"prompt": prompt, **params, **kwargs},
self.cerebriumai_api_key,
)
text = response["data"]["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cerebriumai.html
|
49a5e105d115-0
|
Source code for langchain.llms.forefrontai
"""Wrapper around ForefrontAI APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class ForefrontAI(LLM):
"""Wrapper around ForefrontAI large language models.
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import ForefrontAI
forefrontai = ForefrontAI(endpoint_url="")
"""
endpoint_url: str = ""
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
length: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 40
"""The number of highest probability vocabulary tokens to
keep for top-k-filtering."""
repetition_penalty: int = 1
"""Penalizes repeated tokens according to frequency."""
forefrontai_api_key: Optional[str] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
|
49a5e105d115-1
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
forefrontai_api_key = get_from_dict_or_env(
values, "forefrontai_api_key", "FOREFRONTAI_API_KEY"
)
values["forefrontai_api_key"] = forefrontai_api_key
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling ForefrontAI API."""
return {
"temperature": self.temperature,
"length": self.length,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"endpoint_url": self.endpoint_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "forefrontai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
response = requests.post(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
|
49a5e105d115-2
|
"""
response = requests.post(
url=self.endpoint_url,
headers={
"Authorization": f"Bearer {self.forefrontai_api_key}",
"Content-Type": "application/json",
},
json={"text": prompt, **self._default_params, **kwargs},
)
response_json = response.json()
text = response_json["result"][0]["completion"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
|
37ffbcb10380-0
|
Source code for langchain.llms.fake
"""Fake LLM wrapper for testing purposes."""
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
[docs]class FakeListLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
responses: List
i: int = 0
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response"""
response = self.responses[self.i]
self.i += 1
return response
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response"""
response = self.responses[self.i]
self.i += 1
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/fake.html
|
20b4b9965769-0
|
Source code for langchain.llms.aviary
"""Wrapper around Aviary"""
import dataclasses
import os
from typing import Any, Dict, List, Mapping, Optional, Union, cast
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
TIMEOUT = 60
@dataclasses.dataclass
class AviaryBackend:
backend_url: str
bearer: str
def __post_init__(self) -> None:
self.header = {"Authorization": self.bearer}
@classmethod
def from_env(cls) -> "AviaryBackend":
aviary_url = os.getenv("AVIARY_URL")
assert aviary_url, "AVIARY_URL must be set"
aviary_token = os.getenv("AVIARY_TOKEN", "")
bearer = f"Bearer {aviary_token}" if aviary_token else ""
aviary_url += "/" if not aviary_url.endswith("/") else ""
return cls(aviary_url, bearer)
def get_models() -> List[str]:
"""List available models"""
backend = AviaryBackend.from_env()
request_url = backend.backend_url + "-/routes"
response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT)
try:
result = response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f"Error decoding JSON from {request_url}. Text response: {response.text}"
) from e
result = sorted(
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
|
20b4b9965769-1
|
) from e
result = sorted(
[k.lstrip("/").replace("--", "/") for k in result.keys() if "--" in k]
)
return result
def get_completions(
model: str,
prompt: str,
use_prompt_format: bool = True,
version: str = "",
) -> Dict[str, Union[str, float, int]]:
"""Get completions from Aviary models."""
backend = AviaryBackend.from_env()
url = backend.backend_url + model.replace("/", "--") + "/" + version + "query"
response = requests.post(
url,
headers=backend.header,
json={"prompt": prompt, "use_prompt_format": use_prompt_format},
timeout=TIMEOUT,
)
try:
return response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f"Error decoding JSON from {url}. Text response: {response.text}"
) from e
[docs]class Aviary(LLM):
"""Allow you to use an Aviary.
Aviary is a backend for hosted models. You can
find out more about aviary at
http://github.com/ray-project/aviary
To get a list of the models supported on an
aviary, follow the instructions on the web site to
install the aviary CLI and then use:
`aviary models`
AVIARY_URL and AVIARY_TOKEN environement variables must be set.
Example:
.. code-block:: python
from langchain.llms import Aviary
os.environ["AVIARY_URL"] = "<URL>"
os.environ["AVIARY_TOKEN"] = "<TOKEN>"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
|
20b4b9965769-2
|
os.environ["AVIARY_TOKEN"] = "<TOKEN>"
light = Aviary(model='amazon/LightGPT')
output = light('How do you make fried rice?')
"""
model: str = "amazon/LightGPT"
aviary_url: Optional[str] = None
aviary_token: Optional[str] = None
# If True the prompt template for the model will be ignored.
use_prompt_format: bool = True
# API version to use for Aviary
version: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aviary_url = get_from_dict_or_env(values, "aviary_url", "AVIARY_URL")
aviary_token = get_from_dict_or_env(values, "aviary_token", "AVIARY_TOKEN")
# Set env viarables for aviary sdk
os.environ["AVIARY_URL"] = aviary_url
os.environ["AVIARY_TOKEN"] = aviary_token
try:
aviary_models = get_models()
except requests.exceptions.RequestException as e:
raise ValueError(e)
model = values.get("model")
if model and model not in aviary_models:
raise ValueError(f"{aviary_url} does not support model {values['model']}.")
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model,
"aviary_url": self.aviary_url,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
|
20b4b9965769-3
|
"aviary_url": self.aviary_url,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return f"aviary-{self.model.replace('/', '-')}"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aviary
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aviary("Tell me a joke.")
"""
kwargs = {"use_prompt_format": self.use_prompt_format}
if self.version:
kwargs["version"] = self.version
output = get_completions(
model=self.model,
prompt=prompt,
**kwargs,
)
text = cast(str, output["generated_text"])
if stop:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aviary.html
|
fa0d32304a25-0
|
Source code for langchain.llms.openllm
"""Wrapper around OpenLLM APIs."""
from __future__ import annotations
import copy
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
TypedDict,
Union,
overload,
)
from pydantic import PrivateAttr
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
if TYPE_CHECKING:
import openllm
ServerType = Literal["http", "grpc"]
class IdentifyingParams(TypedDict):
model_name: str
model_id: Optional[str]
server_url: Optional[str]
server_type: Optional[ServerType]
embedded: bool
llm_kwargs: Dict[str, Any]
logger = logging.getLogger(__name__)
[docs]class OpenLLM(LLM):
"""Wrapper for accessing OpenLLM, supporting both in-process model
instance and remote OpenLLM servers.
To use, you should have the openllm library installed:
.. code-block:: bash
pip install openllm
Learn more at: https://github.com/bentoml/openllm
Example running an LLM model locally managed by OpenLLM:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
)
llm("What is the difference between a duck and a goose?")
For all available supported models, you can run 'openllm models'.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-1
|
For all available supported models, you can run 'openllm models'.
If you have a OpenLLM server running, you can also use it remotely:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(server_url='http://localhost:3000')
llm("What is the difference between a duck and a goose?")
"""
model_name: Optional[str] = None
"""Model name to use. See 'openllm models' for all available models."""
model_id: Optional[str] = None
"""Model Id to use. If not provided, will use the default model for the model name.
See 'openllm models' for all available model variants."""
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
server_type: ServerType = "http"
"""Optional server type. Either 'http' or 'grpc'."""
embedded: bool = True
"""Initialize this LLM instance in current process by default. Should
only set to False when using in conjunction with BentoML Service."""
llm_kwargs: Dict[str, Any]
"""Key word arguments to be passed to openllm.LLM"""
_runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None)
_client: Union[
openllm.client.HTTPClient, openllm.client.GrpcClient, None
] = PrivateAttr(default=None)
class Config:
extra = "forbid"
@overload
def __init__(
self,
model_name: Optional[str] = ...,
*,
model_id: Optional[str] = ...,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-2
|
*,
model_id: Optional[str] = ...,
embedded: Literal[True, False] = ...,
**llm_kwargs: Any,
) -> None:
...
@overload
def __init__(
self,
*,
server_url: str = ...,
server_type: Literal["grpc", "http"] = ...,
**llm_kwargs: Any,
) -> None:
...
def __init__(
self,
model_name: Optional[str] = None,
*,
model_id: Optional[str] = None,
server_url: Optional[str] = None,
server_type: Literal["grpc", "http"] = "http",
embedded: bool = True,
**llm_kwargs: Any,
):
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm.'"
) from e
llm_kwargs = llm_kwargs or {}
if server_url is not None:
logger.debug("'server_url' is provided, returning a openllm.Client")
assert (
model_id is None and model_name is None
), "'server_url' and {'model_id', 'model_name'} are mutually exclusive"
client_cls = (
openllm.client.HTTPClient
if server_type == "http"
else openllm.client.GrpcClient
)
client = client_cls(server_url)
super().__init__(
**{
"server_url": server_url,
"server_type": server_type,
"llm_kwargs": llm_kwargs,
}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-3
|
"llm_kwargs": llm_kwargs,
}
)
self._runner = None # type: ignore
self._client = client
else:
assert model_name is not None, "Must provide 'model_name' or 'server_url'"
# since the LLM are relatively huge, we don't actually want to convert the
# Runner with embedded when running the server. Instead, we will only set
# the init_local here so that LangChain users can still use the LLM
# in-process. Wrt to BentoML users, setting embedded=False is the expected
# behaviour to invoke the runners remotely
runner = openllm.Runner(
model_name=model_name,
model_id=model_id,
init_local=embedded,
**llm_kwargs,
)
super().__init__(
**{
"model_name": model_name,
"model_id": model_id,
"embedded": embedded,
"llm_kwargs": llm_kwargs,
}
)
self._client = None # type: ignore
self._runner = runner
@property
def runner(self) -> openllm.LLMRunner:
"""
Get the underlying openllm.LLMRunner instance for integration with BentoML.
Example:
.. code-block:: python
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
embedded=False,
)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-4
|
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
svc = bentoml.Service("langchain-openllm", runners=[llm.runner])
@svc.api(input=Text(), output=Text())
def chat(input_text: str):
return agent.run(input_text)
"""
if self._runner is None:
raise ValueError("OpenLLM must be initialized locally with 'model_name'")
return self._runner
@property
def _identifying_params(self) -> IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client.configuration)
model_name = self._client.model_name
model_id = self._client.model_id
else:
if self._runner is None:
raise ValueError("Runner must be initialized.")
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(
json.loads(self._runner.identifying_params["configuration"])
)
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(
server_url=self.server_url,
server_type=self.server_type,
embedded=self.embedded,
llm_kwargs=self.llm_kwargs,
model_name=model_name,
model_id=model_id,
)
@property
def _llm_type(self) -> str:
return "openllm_client" if self._client else "openllm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-5
|
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
return self._client.query(prompt, **config.model_dump(flatten=True))
else:
assert self._runner is not None
return self._runner(prompt, **config.model_dump(flatten=True))
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
return await self._client.acall(
"generate", prompt, **config.model_dump(flatten=True)
)
else:
assert self._runner is not None
(
prompt,
generate_kwargs,
postprocess_kwargs,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
fa0d32304a25-6
|
(
prompt,
generate_kwargs,
postprocess_kwargs,
) = self._runner.llm.sanitize_parameters(prompt, **kwargs)
generated_result = await self._runner.generate.async_run(
prompt, **generate_kwargs
)
return self._runner.llm.postprocess_generate(
prompt, generated_result, **postprocess_kwargs
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openllm.html
|
d3bf2adbe359-0
|
Source code for langchain.llms.huggingface_endpoint
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
d3bf2adbe359-1
|
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
d3bf2adbe359-2
|
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
params = {**_model_kwargs, **kwargs}
parameter_payload = {"inputs": prompt, "parameters": params}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
d3bf2adbe359-3
|
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html
|
742d57dc5585-0
|
Source code for langchain.llms.azureml_endpoint
"""Wrapper around AzureML Managed Online Endpoint API."""
import json
import urllib.request
from abc import abstractmethod
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class AzureMLEndpointClient(object):
"""Wrapper around AzureML Managed Online Endpoint Client."""
def __init__(
self, endpoint_url: str, endpoint_api_key: str, deployment_name: str
) -> None:
"""Initialize the class."""
if not endpoint_api_key:
raise ValueError("A key should be provided to invoke the endpoint")
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
def call(self, body: bytes) -> bytes:
"""call."""
# The azureml-model-deployment header will force the request to go to a
# specific deployment. Remove this header to have the request observe the
# endpoint traffic rules.
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + self.endpoint_api_key),
"azureml-model-deployment": self.deployment_name,
}
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=50)
result = response.read()
return result
class ContentFormatterBase:
"""A handler class to transform request and response of
AzureML endpoint to match with required schema.
"""
"""
Example:
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
742d57dc5585-1
|
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict
) -> bytes:
input_str = json.dumps(
{
"inputs": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(self, output: str) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
"""
content_type: Optional[str] = "application/json"
"""The MIME type of the input data passed to the endpoint"""
accepts: Optional[str] = "application/json"
"""The MIME type of the response data returned form the endpoint"""
@abstractmethod
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
@abstractmethod
def format_response_payload(self, output: bytes) -> str:
"""Formats the response body according to the output
schema of the model. Returns the data type that is
received from the response.
"""
class OSSContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the OSS catalog."""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps(
{"inputs": {"input_string": [prompt]}, "parameters": model_kwargs}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
742d57dc5585-2
|
)
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
class HFContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the HuggingFace catalog."""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({"inputs": [prompt], "parameters": model_kwargs})
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str:
response_json = json.loads(output)
return response_json[0][0]["generated_text"]
class DollyContentFormatter(ContentFormatterBase):
"""Content handler for the Dolly-v2-12b model"""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps(
{"input_data": {"input_string": [prompt]}, "parameters": model_kwargs}
)
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str:
response_json = json.loads(output)
return response_json[0]
[docs]class AzureMLOnlineEndpoint(LLM, BaseModel):
"""Wrapper around Azure ML Hosted models using Managed Online Endpoints.
Example:
.. code-block:: python
azure_llm = AzureMLModel(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
deployment_name="my-deployment-name",
content_formatter=content_formatter,
)
""" # noqa: E501
endpoint_url: str = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
742d57dc5585-3
|
)
""" # noqa: E501
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: str = ""
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
deployment_name: str = ""
"""Deployment Name for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_DEPLOYMENT_NAME`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exists in environment."""
endpoint_key = get_from_dict_or_env(
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
deployment_name = get_from_dict_or_env(
values, "deployment_name", "AZUREML_DEPLOYMENT_NAME"
)
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name)
return http_client
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
742d57dc5585-4
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"deployment_name": self.deployment_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
body = self.content_formatter.format_request_payload(prompt, _model_kwargs)
endpoint_response = self.http_client.call(body)
response = self.content_formatter.format_response_payload(endpoint_response)
return response
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/azureml_endpoint.html
|
dd0613d97371-0
|
Source code for langchain.llms.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import Any, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms import OpenAI, OpenAIChat
from langchain.schema import LLMResult
[docs]class PromptLayerOpenAI(OpenAI):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerOpenAI LLM adds two optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain.llms import PromptLayerOpenAI
openai = PromptLayerOpenAI(model_name="text-davinci-003")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
dd0613d97371-1
|
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerOpenAI",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop, run_manager)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
dd0613d97371-2
|
generated_responses = await super()._agenerate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAI.async",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
[docs]class PromptLayerOpenAIChat(OpenAIChat):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAIChat LLM can also
be passed here. The PromptLayerOpenAIChat adds two optional
parameters:
``pl_tags``: List of strings to tag the request with.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
dd0613d97371-3
|
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain.llms import PromptLayerOpenAIChat
openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerOpenAIChat",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
dd0613d97371-4
|
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAIChat.async",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
dd0613d97371-5
|
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
|
c632f6301701-0
|
Source code for langchain.llms.anyscale
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
c632f6301701-1
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
if anyscale_service_url.endswith("/"):
anyscale_service_url = anyscale_service_url[:-1]
if not anyscale_service_route.startswith("/"):
anyscale_service_route = "/" + anyscale_service_route
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/routes"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
c632f6301701-2
|
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/anyscale.html
|
83688381afcc-0
|
Source code for langchain.llms.octoai_endpoint
"""Wrapper around OctoAI APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class OctoAIEndpoint(LLM):
"""Wrapper around OctoAI Inference Endpoints.
OctoAIEndpoint is a class to interact with OctoAI
Compute Service large language model endpoints.
To use, you should have the ``octoai`` python package installed, and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://mpt-7b-demo-kk0powt97tmb.octoai.cloud/generate",
model_kwargs={
"max_new_tokens": 200,
"temperature": 0.75,
"top_p": 0.95,
"repetition_penalty": 1,
"seed": None,
"stop": [],
},
)
"""
endpoint_url: Optional[str] = None
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
octoai_api_token: Optional[str] = None
"""OCTOAI API Token"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
83688381afcc-1
|
"""OCTOAI API Token"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(
values, "octoai_api_token", "OCTOAI_API_TOKEN"
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "ENDPOINT_URL"
)
values["octoai_api_token"] = octoai_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OctoAI's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
_model_kwargs = self.model_kwargs or {}
# Prepare the payload JSON
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
83688381afcc-2
|
_model_kwargs = self.model_kwargs or {}
# Prepare the payload JSON
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
try:
# Initialize the OctoAI client
from octoai import client
octoai_client = client.Client(token=self.octoai_api_token)
# Send the request using the OctoAI client
resp_json = octoai_client.infer(self.endpoint_url, parameter_payload)
text = resp_json["generated_text"]
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
if stop is not None:
# Apply stop tokens when making calls to OctoAI
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/octoai_endpoint.html
|
b3f9cd2de67e-0
|
Source code for langchain.llms.ctransformers
"""Wrapper around the C Transformers library."""
from typing import Any, Dict, Optional, Sequence
from pydantic import root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
[docs]class CTransformers(LLM):
"""Wrapper around the C Transformers LLM interface.
To use, you should have the ``ctransformers`` python package installed.
See https://github.com/marella/ctransformers
Example:
.. code-block:: python
from langchain.llms import CTransformers
llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2")
"""
client: Any #: :meta private:
model: str
"""The path to a model file or directory or the name of a Hugging Face Hub
model repo."""
model_type: Optional[str] = None
"""The model type."""
model_file: Optional[str] = None
"""The name of the model file in repo or directory."""
config: Optional[Dict[str, Any]] = None
"""The config parameters.
See https://github.com/marella/ctransformers#config"""
lib: Optional[str] = None
"""The path to a shared library or one of `avx2`, `avx`, `basic`."""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_type": self.model_type,
"model_file": self.model_file,
"config": self.config,
}
@property
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
|
b3f9cd2de67e-1
|
"config": self.config,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ctransformers"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that ``ctransformers`` package is installed."""
try:
from ctransformers import AutoModelForCausalLM
except ImportError:
raise ImportError(
"Could not import `ctransformers` package. "
"Please install it with `pip install ctransformers`"
)
config = values["config"] or {}
values["client"] = AutoModelForCausalLM.from_pretrained(
values["model"],
model_type=values["model_type"],
model_file=values["model_file"],
lib=values["lib"],
**config,
)
return values
def _call(
self,
prompt: str,
stop: Optional[Sequence[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
|
b3f9cd2de67e-2
|
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return "".join(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ctransformers.html
|
f13f728fdb0c-0
|
Source code for langchain.llms.vertexai
"""Wrapper around Google VertexAI models."""
import asyncio
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utilities.vertexai import (
init_vertexai,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from vertexai.language_models._language_models import _LanguageModel
def is_codey_model(model_name: str) -> bool:
return "code" in model_name
class _VertexAICommon(BaseModel):
client: "_LanguageModel" = None #: :meta private:
model_name: str
"Model name to use."
temperature: float = 0.0
"Sampling temperature, it controls the degree of randomness in token selection."
max_output_tokens: int = 128
"Token limit determines the maximum amount of text output from one prompt."
top_p: float = 0.95
"Tokens are selected from most probable to least until the sum of their "
"probabilities equals the top-p value. Top-p is ignored for Codey models."
top_k: int = 40
"How the model selects tokens for output, the next token is selected from "
"among the top-k most probable tokens. Top-k is ignored for Codey models."
stop: Optional[List[str]] = None
"Optional list of stop words to use when generating."
project: Optional[str] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
|
f13f728fdb0c-1
|
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"
"The default location to use when making API calls."
credentials: Any = None
"The default custom credentials (google.auth.credentials.Credentials) to use "
"when making API calls. If not provided, credentials will be ascertained from "
"the environment."
request_parallelism: int = 5
"The amount of parallelism allowed for requests issued to VertexAI models. "
"Default is 5."
task_executor: ClassVar[Optional[Executor]] = None
@property
def is_codey_model(self) -> bool:
return is_codey_model(self.model_name)
@property
def _default_params(self) -> Dict[str, Any]:
if self.is_codey_model:
return {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
}
else:
return {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
}
def _predict(
self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any
) -> str:
params = {**self._default_params, **kwargs}
res = self.client.predict(prompt, **params)
return self._enforce_stop_words(res.text, stop)
def _enforce_stop_words(self, text: str, stop: Optional[List[str]] = None) -> str:
if stop is None and self.stop is not None:
stop = self.stop
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
|
f13f728fdb0c-2
|
if stop is None and self.stop is not None:
stop = self.stop
if stop:
return enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
return "vertexai"
@classmethod
def _get_task_executor(cls, request_parallelism: int = 5) -> Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
@classmethod
def _try_init_vertexai(cls, values: Dict) -> None:
allowed_params = ["project", "location", "credentials"]
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
[docs]class VertexAI(_VertexAICommon, LLM):
"""Wrapper around Google Vertex AI large language models."""
model_name: str = "text-bison"
"The name of the Vertex AI large language model."
tuned_model_name: Optional[str] = None
"The name of a tuned model. If provided, model_name is ignored."
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
tuned_model_name = values.get("tuned_model_name")
model_name = values["model_name"]
try:
if tuned_model_name or not is_codey_model(model_name):
from vertexai.preview.language_models import TextGenerationModel
if tuned_model_name:
values["client"] = TextGenerationModel.get_tuned_model(
tuned_model_name
)
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
|
f13f728fdb0c-3
|
tuned_model_name
)
else:
values["client"] = TextGenerationModel.from_pretrained(model_name)
else:
from vertexai.preview.language_models import CodeGenerationModel
values["client"] = CodeGenerationModel.from_pretrained(model_name)
except ImportError:
raise_vertex_import_error()
return values
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Vertex model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A callback manager for async interaction with LLMs.
Returns:
The string generated by the model.
"""
return await asyncio.wrap_future(
self._get_task_executor().submit(self._predict, prompt, stop)
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Vertex model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A Callbackmanager for LLM run, optional.
Returns:
The string generated by the model.
"""
return self._predict(prompt, stop, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/vertexai.html
|
8d772eded1bc-0
|
Source code for langchain.llms.ai21
"""Wrapper around AI21 APIs."""
from typing import Any, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class AI21PenaltyData(BaseModel):
"""Parameters for AI21 penalty data."""
scale: int = 0
applyToWhitespaces: bool = True
applyToPunctuations: bool = True
applyToNumbers: bool = True
applyToStopwords: bool = True
applyToEmojis: bool = True
[docs]class AI21(LLM):
"""Wrapper around AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import AI21
ai21 = AI21(model="j2-jumbo-instruct")
"""
model: str = "j2-jumbo-instruct"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
maxTokens: int = 256
"""The maximum number of tokens to generate in the completion."""
minTokens: int = 0
"""The minimum number of tokens to generate in the completion."""
topP: float = 1.0
"""Total probability mass of tokens to consider at each step."""
presencePenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens."""
countPenalty: AI21PenaltyData = AI21PenaltyData()
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
|
8d772eded1bc-1
|
countPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to count."""
frequencyPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to frequency."""
numResults: int = 1
"""How many completions to generate for each prompt."""
logitBias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
ai21_api_key: Optional[str] = None
stop: Optional[List[str]] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY")
values["ai21_api_key"] = ai21_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"temperature": self.temperature,
"maxTokens": self.maxTokens,
"minTokens": self.minTokens,
"topP": self.topP,
"presencePenalty": self.presencePenalty.dict(),
"countPenalty": self.countPenalty.dict(),
"frequencyPenalty": self.frequencyPenalty.dict(),
"numResults": self.numResults,
"logitBias": self.logitBias,
}
@property
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
|
8d772eded1bc-2
|
"logitBias": self.logitBias,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ai21"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ai21("Tell me a joke.")
"""
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
if self.base_url is not None:
base_url = self.base_url
else:
if self.model in ("j1-grande-instruct",):
base_url = "https://api.ai21.com/studio/v1/experimental"
else:
base_url = "https://api.ai21.com/studio/v1"
params = {**self._default_params, **kwargs}
response = requests.post(
url=f"{base_url}/{self.model}/complete",
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
|
8d772eded1bc-3
|
response = requests.post(
url=f"{base_url}/{self.model}/complete",
headers={"Authorization": f"Bearer {self.ai21_api_key}"},
json={"prompt": prompt, "stopSequences": stop, **params},
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"AI21 /complete call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
response_json = response.json()
return response_json["completions"][0]["data"]["text"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/ai21.html
|
e33d533c875a-0
|
Source code for langchain.llms.pipelineai
"""Wrapper around Pipeline Cloud API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class PipelineAI(LLM, BaseModel):
"""Wrapper around PipelineAI large language models.
To use, you should have the ``pipeline-ai`` python package installed,
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain import PipelineAI
pipeline = PipelineAI(pipeline_key="")
"""
pipeline_key: str = ""
"""The id or tag of the target pipeline"""
pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any pipeline parameters valid for `create` call not
explicitly specified."""
pipeline_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
|
e33d533c875a-1
|
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to pipeline_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["pipeline_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = get_from_dict_or_env(
values, "pipeline_api_key", "PIPELINE_API_KEY"
)
values["pipeline_api_key"] = pipeline_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"pipeline_key": self.pipeline_key},
**{"pipeline_kwargs": self.pipeline_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pipeline_ai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ValueError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
|
e33d533c875a-2
|
"Please install it with `pip install pipeline-ai`."
)
client = PipelineCloud(token=self.pipeline_api_key)
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f"A pipeline run should have a `result_preview` attribute."
f"Run was: {run}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the pipeline parameters
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/pipelineai.html
|
9d44ce391a09-0
|
Source code for langchain.llms.human
from typing import Any, Callable, List, Mapping, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
def _display_prompt(prompt: str) -> None:
"""Displays the given prompt to the user."""
print(f"\n{prompt}")
def _collect_user_input(
separator: Optional[str] = None, stop: Optional[List[str]] = None
) -> str:
"""Collects and returns user input as a single string."""
separator = separator or "\n"
lines = []
while True:
line = input()
if not line:
break
lines.append(line)
if stop and any(seq in line for seq in stop):
break
# Combine all lines into a single string
multi_line_input = separator.join(lines)
return multi_line_input
[docs]class HumanInputLLM(LLM):
"""
A LLM wrapper which returns user input as the response.
"""
input_func: Callable = Field(default_factory=lambda: _collect_user_input)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
prompt_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""
Returns an empty dictionary as there are no identifying parameters.
"""
return {}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input"
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html
|
9d44ce391a09-1
|
"""Returns the type of LLM."""
return "human-input"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
"""
self.prompt_func(prompt, **self.prompt_kwargs)
user_input = self.input_func(
separator=self.separator, stop=stop, **self.input_kwargs
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the human themselves
user_input = enforce_stop_tokens(user_input, stop)
return user_input
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/human.html
|
6bdbf29394fd-0
|
Source code for langchain.llms.bedrock
import json
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects. Also, provides helper function to extract
the generated text from the model response."""
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic" or provider == "ai21":
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 50
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
6bdbf29394fd-1
|
else:
return response_body.get("results")[0].get("outputText")
[docs]class Bedrock(LLM):
"""LLM provider to invoke Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-tg1-large"
)
"""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-tg1-large, this is
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
6bdbf29394fd-2
|
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
6bdbf29394fd-3
|
"""Return type of llm."""
return "amazon_bedrock"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
provider = self.model_id.split(".")[0]
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bedrock.html
|
778646bd4fc3-0
|
Source code for langchain.llms.databricks
import os
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
__all__ = ["Databricks"]
class _DatabricksClientBase(BaseModel, ABC):
"""A base JSON API client that talks to Databricks."""
api_url: str
api_token: str
def post_raw(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_token}"}
response = requests.post(self.api_url, headers=headers, json=request)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()
@abstractmethod
def post(self, request: Any) -> Any:
...
class _DatabricksServingEndpointClient(_DatabricksClientBase):
"""An API client that talks to a Databricks serving endpoint."""
host: str
endpoint_name: str
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
endpoint_name = values["endpoint_name"]
api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
778646bd4fc3-1
|
return values
def post(self, request: Any) -> Any:
# See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html
wrapped_request = {"dataframe_records": [request]}
response = self.post_raw(wrapped_request)["predictions"]
# For a single-record query, the result is not a list.
if isinstance(response, list):
response = response[0]
return response
class _DatabricksClusterDriverProxyClient(_DatabricksClientBase):
"""An API client that talks to a Databricks cluster driver proxy app."""
host: str
cluster_id: str
cluster_driver_port: str
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
cluster_id = values["cluster_id"]
port = values["cluster_driver_port"]
api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
return self.post_raw(request)
def get_repl_context() -> Any:
"""Gets the notebook REPL context if running inside a Databricks notebook.
Returns None otherwise.
"""
try:
from dbruntime.databricks_repl_context import get_context
return get_context()
except ImportError:
raise ValueError(
"Cannot access dbruntime, not running inside a Databricks notebook."
)
def get_default_host() -> str:
"""Gets the default Databricks workspace hostname.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
778646bd4fc3-2
|
"""Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv("DATABRICKS_HOST")
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
"host was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
# TODO: support Databricks CLI profile
host = host.lstrip("https://").lstrip("http://").rstrip("/")
return host
def get_default_api_token() -> str:
"""Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if api_token := os.getenv("DATABRICKS_TOKEN"):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
"api_token was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
# TODO: support Databricks CLI profile
return api_token
[docs]class Databricks(LLM):
"""LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app.
It supports two endpoint types:
* **Serving endpoint** (recommended for both production and development).
We assume that an LLM was registered and deployed to a serving endpoint.
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
778646bd4fc3-3
|
We assume that an LLM was registered and deployed to a serving endpoint.
To wrap it as an LLM you must have "Can Query" permission to the endpoint.
Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and
``cluster_driver_port``.
The expected model signature is:
* inputs::
[{"name": "prompt", "type": "string"},
{"name": "stop", "type": "list[string]"}]
* outputs: ``[{"type": "string"}]``
* **Cluster driver proxy app** (recommended for interactive development).
One can load an LLM on a Databricks interactive cluster and start a local HTTP
server on the driver node to serve the model at ``/`` using HTTP POST method
with JSON input/output.
Please use a port number between ``[3000, 8000]`` and let the server listen to
the driver IP address or simply ``0.0.0.0`` instead of localhost only.
To wrap it as an LLM you must have "Can Attach To" permission to the cluster.
Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``.
The expected server schema (using JSON schema) is:
* inputs::
{"type": "object",
"properties": {
"prompt": {"type": "string"},
"stop": {"type": "array", "items": {"type": "string"}}},
"required": ["prompt"]}`
* outputs: ``{"type": "string"}``
If the endpoint model signature is different or you want to set extra params,
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
778646bd4fc3-4
|
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
transformations before and after the query.
"""
host: str = Field(default_factory=get_default_host)
"""Databricks workspace hostname.
If not provided, the default value is determined by
* the ``DATABRICKS_HOST`` environment variable if present, or
* the hostname of the current Databricks workspace if running inside
a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode.
"""
api_token: str = Field(default_factory=get_default_api_token)
"""Databricks personal access token.
If not provided, the default value is determined by
* the ``DATABRICKS_TOKEN`` environment variable if present, or
* an automatically generated temporary token if running inside a Databricks
notebook attached to an interactive cluster in "single user" or
"no isolation shared" mode.
"""
endpoint_name: Optional[str] = None
"""Name of the model serving endpont.
You must specify the endpoint name to connect to a model serving endpoint.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_id: Optional[str] = None
"""ID of the cluster if connecting to a cluster driver proxy app.
If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs
inside a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode, the current cluster ID is used as default.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_driver_port: Optional[str] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.