id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
88484d1ed57e-1
f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = []
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
88484d1ed57e-2
"""Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" streaming: bool = True """Whether to stream the results, token by token.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, **model_params) except ImportError: raise ModuleNotFoundError(
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
88484d1ed57e-3
except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" return { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llama.cpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing paramaters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
88484d1ed57e-4
Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager): combined_text_output += token["choices"][0]["text"] return combined_text_output else: params = self._get_parameters(stop) result = self.client(prompt=prompt, **params)
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
88484d1ed57e-5
result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] [docs] def stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> Generator[Dict, None, None]: """Yields results objects as they are generated in real time. BETA: this is a beta feature while we figure out the right abstraction: Once that happens, this interface could change. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = self._get_parameters(stop) result = self.client(prompt=prompt, stream=True, **params) for chunk in result: token = chunk["choices"][0]["text"]
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
88484d1ed57e-6
for chunk in result: token = chunk["choices"][0]["text"] log_probs = chunk["choices"][0].get("logprobs", None) if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield chunk By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
ce229b158366-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") [docs]class HuggingFaceHub(LLM): """Wrapper around HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
ce229b158366-1
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call( self, prompt: str, stop: Optional[List[str]] = None,
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
ce229b158366-2
self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} response = self.client(inputs=prompt, params=_model_kwargs) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] elif self.client.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/huggingface_hub.html
96f6236f2f35-0
Source code for langchain.llms.human from typing import Any, Callable, List, Mapping, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens def _display_prompt(prompt: str) -> None: """Displays the given prompt to the user.""" print(f"\n{prompt}") def _collect_user_input( separator: Optional[str] = None, stop: Optional[List[str]] = None ) -> str: """Collects and returns user input as a single string.""" separator = separator or "\n" lines = [] while True: line = input() if not line: break lines.append(line) if stop and any(seq in line for seq in stop): break # Combine all lines into a single string multi_line_input = separator.join(lines) return multi_line_input [docs]class HumanInputLLM(LLM): """ A LLM wrapper which returns user input as the response. """ input_func: Callable = Field(default_factory=lambda: _collect_user_input) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _display_prompt) separator: str = "\n" input_kwargs: Mapping[str, Any] = {} prompt_kwargs: Mapping[str, Any] = {} @property def _identifying_params(self) -> Mapping[str, Any]: """ Returns an empty dictionary as there are no identifying parameters. """ return {} @property def _llm_type(self) -> str: """Returns the type of LLM.""" return "human-input"
https://python.langchain.com/en/latest/_modules/langchain/llms/human.html
96f6236f2f35-1
"""Returns the type of LLM.""" return "human-input" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """ Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. stop (Optional[List[str]]): A list of stop strings. run_manager (Optional[CallbackManagerForLLMRun]): Currently not used. Returns: str: The user's input as a response. """ self.prompt_func(prompt, **self.prompt_kwargs) user_input = self.input_func( separator=self.separator, stop=stop, **self.input_kwargs ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the human themselves user_input = enforce_stop_tokens(user_input, stop) return user_input By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/human.html
731a3cbc06a8-0
Source code for langchain.llms.databricks import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM __all__ = ["Databricks"] class _DatabricksClientBase(BaseModel, ABC): """A base JSON API client that talks to Databricks.""" api_url: str api_token: str def post_raw(self, request: Any) -> Any: headers = {"Authorization": f"Bearer {self.api_token}"} response = requests.post(self.api_url, headers=headers, json=request) # TODO: error handling and automatic retries if not response.ok: raise ValueError(f"HTTP {response.status_code} error: {response.text}") return response.json() @abstractmethod def post(self, request: Any) -> Any: ... class _DatabricksServingEndpointClient(_DatabricksClientBase): """An API client that talks to a Databricks serving endpoint.""" host: str endpoint_name: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] endpoint_name = values["endpoint_name"] api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations" values["api_url"] = api_url return values def post(self, request: Any) -> Any:
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-1
return values def post(self, request: Any) -> Any: # See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html wrapped_request = {"dataframe_records": [request]} response = self.post_raw(wrapped_request)["predictions"] # For a single-record query, the result is not a list. if isinstance(response, list): response = response[0] return response class _DatabricksClusterDriverProxyClient(_DatabricksClientBase): """An API client that talks to a Databricks cluster driver proxy app.""" host: str cluster_id: str cluster_driver_port: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] cluster_id = values["cluster_id"] port = values["cluster_driver_port"] api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}" values["api_url"] = api_url return values def post(self, request: Any) -> Any: return self.post_raw(request) def get_repl_context() -> Any: """Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ValueError( "Cannot access dbruntime, not running inside a Databricks notebook." ) def get_default_host() -> str: """Gets the default Databricks workspace hostname.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-2
def get_default_host() -> str: """Gets the default Databricks workspace hostname. Raises an error if the hostname cannot be automatically determined. """ host = os.getenv("DATABRICKS_HOST") if not host: try: host = get_repl_context().browserHostName if not host: raise ValueError("context doesn't contain browserHostName.") except Exception as e: raise ValueError( "host was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_HOST'. Received error: {e}" ) # TODO: support Databricks CLI profile host = host.lstrip("https://").lstrip("http://").rstrip("/") return host def get_default_api_token() -> str: """Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if api_token := os.getenv("DATABRICKS_API_TOKEN"): return api_token try: api_token = get_repl_context().apiToken if not api_token: raise ValueError("context doesn't contain apiToken.") except Exception as e: raise ValueError( "api_token was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_API_TOKEN'. Received error: {e}" ) # TODO: support Databricks CLI profile return api_token [docs]class Databricks(LLM): """LLM wrapper around a Databricks serving endpoint or a cluster driver proxy app. It supports two endpoint types: * **Serving endpoint** (recommended for both production and development).
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-3
* **Serving endpoint** (recommended for both production and development). We assume that an LLM was registered and deployed to a serving endpoint. To wrap it as an LLM you must have "Can Query" permission to the endpoint. Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and ``cluster_driver_port``. The expected model signature is: * inputs:: [{"name": "prompt", "type": "string"}, {"name": "stop", "type": "list[string]"}] * outputs: ``[{"type": "string"}]`` * **Cluster driver proxy app** (recommended for interactive development). One can load an LLM on a Databricks interactive cluster and start a local HTTP server on the driver node to serve the model at ``/`` using HTTP POST method with JSON input/output. Please use a port number between ``[3000, 8000]`` and let the server listen to the driver IP address or simply ``0.0.0.0`` instead of localhost only. To wrap it as an LLM you must have "Can Attach To" permission to the cluster. Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``. The expected server schema (using JSON schema) is: * inputs:: {"type": "object", "properties": { "prompt": {"type": "string"}, "stop": {"type": "array", "items": {"type": "string"}}}, "required": ["prompt"]}` * outputs: ``{"type": "string"}`` If the endpoint model signature is different or you want to set extra params,
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-4
If the endpoint model signature is different or you want to set extra params, you can use `transform_input_fn` and `transform_output_fn` to apply necessary transformations before and after the query. """ host: str = Field(default_factory=get_default_host) """Databricks workspace hostname. If not provided, the default value is determined by * the ``DATABRICKS_HOST`` environment variable if present, or * the hostname of the current Databricks workspace if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ api_token: str = Field(default_factory=get_default_api_token) """Databricks personal access token. If not provided, the default value is determined by * the ``DATABRICKS_API_TOKEN`` environment variable if present, or * an automatically generated temporary token if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ endpoint_name: Optional[str] = None """Name of the model serving endpont. You must specify the endpoint name to connect to a model serving endpoint. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_id: Optional[str] = None """ID of the cluster if connecting to a cluster driver proxy app. If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode, the current cluster ID is used as default. You must not set both ``endpoint_name`` and ``cluster_id``.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-5
You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_driver_port: Optional[str] = None """The port number used by the HTTP server running on the cluster driver node. The server should listen on the driver IP address or simply ``0.0.0.0`` to connect. We recommend the server using a port number between ``[3000, 8000]``. """ model_kwargs: Optional[Dict[str, Any]] = None """Extra parameters to pass to the endpoint.""" transform_input_fn: Optional[Callable] = None """A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible request object that the endpoint accepts. For example, you can apply a prompt template to the input prompt. """ transform_output_fn: Optional[Callable[..., str]] = None """A function that transforms the output from the endpoint to the generated text. """ _client: _DatabricksClientBase = PrivateAttr() class Config: extra = Extra.forbid underscore_attrs_are_private = True @validator("cluster_id", always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_id.") elif values["endpoint_name"]: return None elif v: return v else: try: if v := get_repl_context().clusterId: return v raise ValueError("Context doesn't contain clusterId.") except Exception as e: raise ValueError( "Neither endpoint_name nor cluster_id was set. "
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-6
raise ValueError( "Neither endpoint_name nor cluster_id was set. " "And the cluster_id cannot be automatically determined. Received" f" error: {e}" ) @validator("cluster_driver_port", always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_driver_port.") elif values["endpoint_name"]: return None elif v is None: raise ValueError( "Must set cluster_driver_port to connect to a cluster driver." ) elif int(v) <= 0: raise ValueError(f"Invalid cluster_driver_port: {v}") else: return v @validator("model_kwargs", always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: if v: assert "prompt" not in v, "model_kwargs must not contain key 'prompt'" assert "stop" not in v, "model_kwargs must not contain key 'stop'" return v def __init__(self, **data: Any): super().__init__(**data) if self.endpoint_name: self._client = _DatabricksServingEndpointClient( host=self.host, api_token=self.api_token, endpoint_name=self.endpoint_name, ) elif self.cluster_id and self.cluster_driver_port: self._client = _DatabricksClusterDriverProxyClient( host=self.host, api_token=self.api_token, cluster_id=self.cluster_id, cluster_driver_port=self.cluster_driver_port, ) else:
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
731a3cbc06a8-7
cluster_driver_port=self.cluster_driver_port, ) else: raise ValueError( "Must specify either endpoint_name or cluster_id/cluster_driver_port." ) @property def _llm_type(self) -> str: """Return type of llm.""" return "databricks" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Queries the LLM endpoint with the given prompt and stop sequence.""" # TODO: support callbacks request = {"prompt": prompt, "stop": stop} if self.model_kwargs: request.update(self.model_kwargs) if self.transform_input_fn: request = self.transform_input_fn(**request) response = self._client.post(request) if self.transform_output_fn: response = self.transform_output_fn(response) return response By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/databricks.html
c1c401014d58-0
Source code for langchain.llms.gpt4all """Wrapper for the GPT4All model.""" from functools import partial from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class GPT4All(LLM): r"""Wrapper around GPT4All language models. To use, you should have the ``gpt4all`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import GPT4All model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8) # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained GPT4All model file.""" backend: Optional[str] = Field(None, alias="backend") n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(0, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all")
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
c1c401014d58-1
logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" embedding: bool = Field(False, alias="embedding") """Use embedding mode only.""" n_threads: Optional[int] = Field(4, alias="n_threads") """Number of threads to use.""" n_predict: Optional[int] = 256 """The maximum number of tokens to generate.""" temp: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_last_n: Optional[int] = 64 "Last n tokens to penalize" repeat_penalty: Optional[float] = 1.3 """The penalty to apply to repeated tokens.""" n_batch: int = Field(1, alias="n_batch") """Batch size for prompt processing.""" streaming: bool = False """Whether to stream the results or not.""" context_erase: float = 0.5 """Leave (n_ctx * context_erase) tokens starting from beginning if the context has run out."""
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
c1c401014d58-2
starting from beginning if the context has run out.""" client: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @staticmethod def _model_param_names() -> Set[str]: return { "n_ctx", "n_predict", "top_k", "top_p", "temp", "n_batch", "repeat_penalty", "repeat_last_n", "context_erase", } def _default_params(self) -> Dict[str, Any]: return { "n_ctx": self.n_ctx, "n_predict": self.n_predict, "top_k": self.top_k, "top_p": self.top_p, "temp": self.temp, "n_batch": self.n_batch, "repeat_penalty": self.repeat_penalty, "repeat_last_n": self.repeat_last_n, "context_erase": self.context_erase, } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel full_path = values["model"] model_path, delimiter, model_name = full_path.rpartition("/") model_path += delimiter values["client"] = GPT4AllModel( model_name=model_name, model_path=model_path or None, model_type=values["backend"], allow_download=False, ) values["backend"] = values["client"].model.model_type except ImportError: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
c1c401014d58-3
except ImportError: raise ValueError( "Could not import gpt4all python package. " "Please install it with `pip install gpt4all`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params(), **{ k: v for k, v in self.__dict__.items() if k in self._model_param_names() }, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "gpt4all" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: r"""Call out to GPT4All's generate method. Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client.generate(prompt, **self._default_params()): if text_callback: text_callback(token) text += token if stop is not None: text = enforce_stop_tokens(text, stop) return text
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
c1c401014d58-4
text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html
a50dee76f169-0
Source code for langchain.llms.fake """Fake LLM wrapper for testing purposes.""" from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class FakeListLLM(LLM): """Fake LLM wrapper for testing purposes.""" responses: List i: int = 0 @property def _llm_type(self) -> str: """Return type of llm.""" return "fake-list" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """First try to lookup in queries, else return 'foo' or 'bar'.""" response = self.responses[self.i] self.i += 1 return response @property def _identifying_params(self) -> Mapping[str, Any]: return {"responses": self.responses} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/fake.html
0e25f02aa3ae-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request."""
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
0e25f02aa3ae-1
"""Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)):
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
0e25f02aa3ae-2
for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
0e25f02aa3ae-3
``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {}
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
0e25f02aa3ae-4
generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/promptlayer_openai.html
c9337a5a8a6d-0
Source code for langchain.llms.forefrontai """Wrapper around ForefrontAI APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class ForefrontAI(LLM): """Wrapper around ForefrontAI large language models. To use, you should have the environment variable ``FOREFRONTAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import ForefrontAI forefrontai = ForefrontAI(endpoint_url="") """ endpoint_url: str = "" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 40 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: int = 1 """Penalizes repeated tokens according to frequency.""" forefrontai_api_key: Optional[str] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
c9337a5a8a6d-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return { "temperature": self.temperature, "length": self.length, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"endpoint_url": self.endpoint_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "forefrontai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to ForefrontAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ForefrontAI("Tell me a joke.") """ response = requests.post( url=self.endpoint_url,
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
c9337a5a8a6d-2
""" response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params}, ) response_json = response.json() text = response_json["result"][0]["completion"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/forefrontai.html
55be8921338c-0
Source code for langchain.llms.beam """Wrapper around Beam API.""" import base64 import json import logging import subprocess import textwrap import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) DEFAULT_NUM_TRIES = 10 DEFAULT_SLEEP_TIME = 4 [docs]class Beam(LLM): """Wrapper around Beam API for gpt2 large language model. To use, you should have the ``beam-sdk`` python package installed, and the environment variable ``BEAM_CLIENT_ID`` set with your client id and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how to get these is available here: https://docs.beam.cloud/account/api-keys. The wrapper can then be called as follows, where the name, cpu, memory, gpu, python version, and python packages can be updated accordingly. Once deployed, the instance can be called. llm = Beam(model_name="gpt2", name="langchain-gpt2", cpu=8, memory="32Gi", gpu="A10G", python_version="python3.8", python_packages=[ "diffusers[torch]>=0.10", "transformers", "torch", "pillow", "accelerate", "safetensors", "xformers",], max_length=50) llm._deploy() call_result = llm._call(input) """
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
55be8921338c-1
llm._deploy() call_result = llm._call(input) """ model_name: str = "" name: str = "" cpu: str = "" memory: str = "" gpu: str = "" python_version: str = "" python_packages: List[str] = [] max_length: str = "" url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" beam_client_id: str = "" beam_client_secret: str = "" app_id: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
55be8921338c-2
"""Validate that api key and python package exists in environment.""" beam_client_id = get_from_dict_or_env( values, "beam_client_id", "BEAM_CLIENT_ID" ) beam_client_secret = get_from_dict_or_env( values, "beam_client_secret", "BEAM_CLIENT_SECRET" ) values["beam_client_id"] = beam_client_id values["beam_client_secret"] = beam_client_secret return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "name": self.name, "cpu": self.cpu, "memory": self.memory, "gpu": self.gpu, "python_version": self.python_version, "python_packages": self.python_packages, "max_length": self.max_length, "model_kwargs": self.model_kwargs, } @property def _llm_type(self) -> str: """Return type of llm.""" return "beam" [docs] def app_creation(self) -> None: """Creates a Python file which will contain your Beam app definition.""" script = textwrap.dedent( """\ import beam # The environment your code will run on app = beam.App( name="{name}", cpu={cpu}, memory="{memory}", gpu="{gpu}", python_version="{python_version}", python_packages={python_packages}, ) app.Trigger.RestAPI( inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}},
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
55be8921338c-3
outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = "app.py" with open(script_name, "w") as file: file.write( script.format( name=self.name, cpu=self.cpu, memory=self.memory, gpu=self.gpu, python_version=self.python_version, python_packages=self.python_packages, ) ) [docs] def run_creation(self) -> None: """Creates a Python file which will be deployed on beam.""" script = textwrap.dedent( """ import os import transformers from transformers import GPT2LMHeadModel, GPT2Tokenizer model_name = "{model_name}" def beam_langchain(**inputs): prompt = inputs["prompt"] length = inputs["max_length"] tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) encodedPrompt = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(encodedPrompt, max_length=int(length), do_sample=True, pad_token_id=tokenizer.eos_token_id) output = tokenizer.decode(outputs[0], skip_special_tokens=True) print(output) return {{"text": output}} """ ) script_name = "run.py" with open(script_name, "w") as file: file.write(script.format(model_name=self.model_name)) def _deploy(self) -> str: """Call to Beam.""" try: import beam # type: ignore if beam.__path__ == "": raise ImportError except ImportError:
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
55be8921338c-4
if beam.__path__ == "": raise ImportError except ImportError: raise ImportError( "Could not import beam python package. " "Please install it with `curl " "https://raw.githubusercontent.com/slai-labs" "/get-beam/main/get-beam.sh -sSfL | sh`." ) self.app_creation() self.run_creation() process = subprocess.run( "beam deploy app.py", shell=True, capture_output=True, text=True ) if process.returncode == 0: output = process.stdout logger.info(output) lines = output.split("\n") for line in lines: if line.startswith(" i Send requests to: https://apps.beam.cloud/"): self.app_id = line.split("/")[-1] self.url = line.split(":")[1].strip() return self.app_id raise ValueError( f"""Failed to retrieve the appID from the deployment output. Deployment output: {output}""" ) else: raise ValueError(f"Deployment failed. Error: {process.stderr}") @property def authorization(self) -> str: if self.beam_client_id: credential_str = self.beam_client_id + ":" + self.beam_client_secret else: credential_str = self.beam_client_secret return base64.b64encode(credential_str.encode()).decode() def _call( self, prompt: str, stop: Optional[list] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call to Beam."""
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
55be8921338c-5
) -> str: """Call to Beam.""" url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url payload = {"prompt": prompt, "max_length": self.max_length} headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Authorization": "Basic " + self.authorization, "Connection": "keep-alive", "Content-Type": "application/json", } for _ in range(DEFAULT_NUM_TRIES): request = requests.post(url, headers=headers, data=json.dumps(payload)) if request.status_code == 200: return request.json()["text"] time.sleep(DEFAULT_SLEEP_TIME) logger.warning("Unable to successfully call model.") return "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/llms/beam.html
69df329ba2a2-0
Source code for langchain.output_parsers.retry from __future__ import annotations from typing import TypeVar from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseOutputParser, OutputParserException, PromptValue, ) NAIVE_COMPLETION_RETRY = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Please try again:""" NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Details: {error} Please try again:""" NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY) NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template( NAIVE_COMPLETION_RETRY_WITH_ERROR ) T = TypeVar("T") [docs]class RetryOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. """ parser: BaseOutputParser[T] retry_chain: LLMChain [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, ) -> RetryOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt)
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
69df329ba2a2-1
chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry" [docs]class RetryWithErrorOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. """ parser: BaseOutputParser[T] retry_chain: LLMChain [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, ) -> RetryWithErrorOutputParser[T]:
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
69df329ba2a2-2
) -> RetryWithErrorOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion, error=repr(e) ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry_with_error" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html
ff0e4c8a6257-0
Source code for langchain.output_parsers.fix from __future__ import annotations from typing import TypeVar from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseOutputParser, OutputParserException T = TypeVar("T") [docs]class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" parser: BaseOutputParser[T] retry_chain: LLMChain [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, ) -> OutputFixingParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) [docs] def parse(self, completion: str) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) parsed_completion = self.parser.parse(new_completion) return parsed_completion [docs] def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/fix.html
b63653fb4007-0
Source code for langchain.output_parsers.regex from __future__ import annotations import re from typing import Dict, List, Optional from langchain.schema import BaseOutputParser [docs]class RegexParser(BaseOutputParser): """Class to parse the output into a dictionary.""" regex: str output_keys: List[str] default_output_key: Optional[str] = None @property def _type(self) -> str: """Return the type key.""" return "regex_parser" [docs] def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} else: if self.default_output_key is None: raise ValueError(f"Could not parse output: {text}") else: return { key: text if key == self.default_output_key else "" for key in self.output_keys } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex.html
f1a8f7342470-0
Source code for langchain.output_parsers.regex_dict from __future__ import annotations import re from typing import Dict, Optional from langchain.schema import BaseOutputParser [docs]class RegexDictParser(BaseOutputParser): """Class to parse the output into a dictionary.""" regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private: output_key_to_format: Dict[str, str] no_update_value: Optional[str] = None @property def _type(self) -> str: """Return the type key.""" return "regex_dict_parser" [docs] def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" result = {} for output_key, expected_format in self.output_key_to_format.items(): specific_regex = self.regex_pattern.format(re.escape(expected_format)) matches = re.findall(specific_regex, text) if not matches: raise ValueError( f"No match found for output key: {output_key} with expected format \ {expected_format} on text {text}" ) elif len(matches) > 1: raise ValueError( f"Multiple matches found for output key: {output_key} with \ expected format {expected_format} on text {text}" ) elif ( self.no_update_value is not None and matches[0] == self.no_update_value ): continue else: result[output_key] = matches[0] return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex_dict.html
488db1834f65-0
Source code for langchain.output_parsers.structured from __future__ import annotations from typing import Any, List from pydantic import BaseModel from langchain.output_parsers.format_instructions import STRUCTURED_FORMAT_INSTRUCTIONS from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.schema import BaseOutputParser line_template = '\t"{name}": {type} // {description}' [docs]class ResponseSchema(BaseModel): name: str description: str def _get_sub_string(schema: ResponseSchema) -> str: return line_template.format( name=schema.name, description=schema.description, type="string" ) [docs]class StructuredOutputParser(BaseOutputParser): response_schemas: List[ResponseSchema] [docs] @classmethod def from_response_schemas( cls, response_schemas: List[ResponseSchema] ) -> StructuredOutputParser: return cls(response_schemas=response_schemas) [docs] def get_format_instructions(self) -> str: schema_str = "\n".join( [_get_sub_string(schema) for schema in self.response_schemas] ) return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str) [docs] def parse(self, text: str) -> Any: expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys) @property def _type(self) -> str: return "structured" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html
20c46815ac85-0
Source code for langchain.output_parsers.pydantic import json import re from typing import Type, TypeVar from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS from langchain.schema import BaseOutputParser, OutputParserException T = TypeVar("T", bound=BaseModel) [docs]class PydanticOutputParser(BaseOutputParser[T]): pydantic_object: Type[T] [docs] def parse(self, text: str) -> T: try: # Greedy search for 1st json candidate. match = re.search( r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL ) json_str = "" if match: json_str = match.group() json_object = json.loads(json_str, strict=False) return self.pydantic_object.parse_obj(json_object) except (json.JSONDecodeError, ValidationError) as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {text}. Got: {e}" raise OutputParserException(msg) [docs] def get_format_instructions(self) -> str: schema = self.pydantic_object.schema() # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str:
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html
20c46815ac85-1
@property def _type(self) -> str: return "pydantic" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html
69233280fc26-0
Source code for langchain.output_parsers.rail_parser from __future__ import annotations from typing import Any, Dict from langchain.schema import BaseOutputParser [docs]class GuardrailsOutputParser(BaseOutputParser): guard: Any @property def _type(self) -> str: return "guardrails" [docs] @classmethod def from_rail(cls, rail_file: str, num_reasks: int = 1) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ValueError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls(guard=Guard.from_rail(rail_file, num_reasks=num_reasks)) [docs] @classmethod def from_rail_string( cls, rail_str: str, num_reasks: int = 1 ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ValueError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks)) [docs] def get_format_instructions(self) -> str: return self.guard.raw_prompt.format_instructions [docs] def parse(self, text: str) -> Dict: return self.guard.parse(text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
40f4731744db-0
Source code for langchain.output_parsers.list from __future__ import annotations from abc import abstractmethod from typing import List from langchain.schema import BaseOutputParser [docs]class ListOutputParser(BaseOutputParser): """Class to parse the output of an LLM call to a list.""" @property def _type(self) -> str: return "list" [docs] @abstractmethod def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" [docs]class CommaSeparatedListOutputParser(ListOutputParser): """Parse out comma separated lists.""" [docs] def get_format_instructions(self) -> str: return ( "Your response should be a list of comma separated values, " "eg: `foo, bar, baz`" ) [docs] def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" return text.strip().split(", ") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/output_parsers/list.html
b889963362a5-0
Source code for langchain.chat_models.vertexai """Wrapper around Google VertexAI chat-based models.""" from dataclasses import dataclass, field from typing import Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.vertexai import _VertexAICommon from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage, ) from langchain.utilities.vertexai import raise_vertex_import_error @dataclass class _MessagePair: """InputOutputTextPair represents a pair of input and output texts.""" question: HumanMessage answer: AIMessage @dataclass class _ChatHistory: """InputOutputTextPair represents a pair of input and output texts.""" history: List[_MessagePair] = field(default_factory=list) system_message: Optional[SystemMessage] = None def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory: """Parse a sequence of messages into history. A sequence should be either (SystemMessage, HumanMessage, AIMessage, HumanMessage, AIMessage, ...) or (HumanMessage, AIMessage, HumanMessage, AIMessage, ...). Args: history: The list of messages to re-create the history of the chat. Returns: A parsed chat history. Raises: ValueError: If a sequence of message is odd, or a human message is not followed by a message from AI (e.g., Human, Human, AI or AI, AI, Human). """ if not history:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
b889963362a5-1
""" if not history: return _ChatHistory() first_message = history[0] system_message = first_message if isinstance(first_message, SystemMessage) else None chat_history = _ChatHistory(system_message=system_message) messages_left = history[1:] if system_message else history if len(messages_left) % 2 != 0: raise ValueError( f"Amount of messages in history should be even, got {len(messages_left)}!" ) for question, answer in zip(messages_left[::2], messages_left[1::2]): if not isinstance(question, HumanMessage) or not isinstance(answer, AIMessage): raise ValueError( "A human message should follow a bot one, " f"got {question.type}, {answer.type}." ) chat_history.history.append(_MessagePair(question=question, answer=answer)) return chat_history [docs]class ChatVertexAI(_VertexAICommon, BaseChatModel): """Wrapper around Vertex AI large language models.""" model_name: str = "chat-bison" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) try: from vertexai.preview.language_models import ChatModel except ImportError: raise_vertex_import_error() values["client"] = ChatModel.from_pretrained(values["model_name"]) return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
b889963362a5-2
) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The Callbackmanager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) question = messages[-1] if not isinstance(question, HumanMessage): raise ValueError( f"Last message in the list should be from human, got {question.type}." ) history = _parse_chat_history(messages[:-1]) context = history.system_message.content if history.system_message else None chat = self.client.start_chat(context=context, **self._default_params) for pair in history.history: chat._history.append((pair.question.content, pair.answer.content)) response = chat.send_message(question.content) text = self._enforce_stop_words(response.text, stop) return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: raise NotImplementedError( """Vertex AI doesn't support async requests at the moment.""" ) By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
b889963362a5-3
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
9a4d987a18d3-0
Source code for langchain.chat_models.openai """OpenAI chat wrapper.""" from __future__ import annotations import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) def _import_tiktoken() -> Any: try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install tiktoken`." ) return tiktoken def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry(
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-1
return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_dict_to_message(_dict: dict) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": return AIMessage(content=_dict["content"]) elif role == "system": return SystemMessage(content=_dict["content"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage):
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-2
elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict [docs]class ChatOpenAI(BaseChatModel): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator."""
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-3
leave blank if not using a proxy or service emulator.""" openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" class Config: """Configuration for this pydantic object.""" extra = Extra.ignore allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-4
invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization if openai_api_base: openai.api_base = openai_api_base if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 try:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-5
try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError)
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-6
| retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs] def completion_with_retry(self, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: # Happens in streaming continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage, "model_name": self.model_name} def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True for stream_resp in self.completion_with_retry( messages=message_dicts, **params ):
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-7
messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( {"content": inner_completion, "role": role} ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"], "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage],
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-8
async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( {"content": inner_completion, "role": role} ) return ChatResult(generations=[ChatGeneration(message=message)]) else: response = await acompletion_with_retry( self, messages=message_dicts, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "openai-chat" def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]: tiktoken_ = _import_tiktoken() model = self.model_name if model == "gpt-3.5-turbo":
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-9
if model == "gpt-3.5-turbo": # gpt-3.5-turbo may change over time. # Returning num tokens assuming gpt-3.5-turbo-0301. model = "gpt-3.5-turbo-0301" elif model == "gpt-4": # gpt-4 may change over time. # Returning num tokens assuming gpt-4-0314. model = "gpt-4-0314" # Returns the number of tokens used by a list of messages. try: encoding = tiktoken_.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding [docs] def get_token_ids(self, text: str) -> List[int]: """Get the tokens present in the text with tiktoken package.""" # tiktoken NOT supported for Python 3.7 or below if sys.version_info[1] <= 7: return super().get_token_ids(text) _, encoding_model = self._get_encoding_model() return encoding_model.encode(text) [docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
9a4d987a18d3-10
if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() if model == "gpt-3.5-turbo-0301": # every message follows <im_start>{role/name}\n{content}<im_end>\n tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 elif model == "gpt-4-0314": tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) num_tokens = 0 messages_dict = [_convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name # every reply is primed with <im_start>assistant num_tokens += 3 return num_tokens By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
88c42b1ab8d0-0
Source code for langchain.chat_models.anthropic from typing import Any, Dict, List, Optional from pydantic import Extra from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.anthropic import _AnthropicCommon from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) [docs]class ChatAnthropic(BaseChatModel, _AnthropicCommon): r"""Wrapper around Anthropic's large language model. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key") """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _llm_type(self) -> str: """Return type of chat model.""" return "anthropic-chat" def _convert_one_message_to_text(self, message: BaseMessage) -> str: if isinstance(message, ChatMessage): message_text = f"\n\n{message.role.capitalize()}: {message.content}" elif isinstance(message, HumanMessage): message_text = f"{self.HUMAN_PROMPT} {message.content}" elif isinstance(message, AIMessage):
https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
88c42b1ab8d0-1
elif isinstance(message, AIMessage): message_text = f"{self.AI_PROMPT} {message.content}" elif isinstance(message, SystemMessage): message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>" else: raise ValueError(f"Got unknown type {message}") return message_text def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str: """Format a list of strings into a single string with necessary newlines. Args: messages (List[BaseMessage]): List of BaseMessage to combine. Returns: str: Combined string with necessary newlines. """ return "".join( self._convert_one_message_to_text(message) for message in messages ) def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str: """Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. Returns: str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags. """ if not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if not isinstance(messages[-1], AIMessage): messages.append(AIMessage(content="")) text = self._convert_messages_to_text(messages) return ( text.rstrip() ) # trim off the trailing ' ' that might come from the "Assistant: " def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
88c42b1ab8d0-2
) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) params: Dict[str, Any] = {"prompt": prompt, **self._default_params} if stop: params["stop_sequences"] = stop if self.streaming: completion = "" stream_resp = self.client.completion_stream(**params) for data in stream_resp: delta = data["completion"][len(completion) :] completion = data["completion"] if run_manager: run_manager.on_llm_new_token( delta, ) else: response = self.client.completion(**params) completion = response["completion"] message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) params: Dict[str, Any] = {"prompt": prompt, **self._default_params} if stop: params["stop_sequences"] = stop if self.streaming: completion = "" stream_resp = await self.client.acompletion_stream(**params) async for data in stream_resp: delta = data["completion"][len(completion) :] completion = data["completion"] if run_manager: await run_manager.on_llm_new_token( delta, ) else: response = await self.client.acompletion(**params) completion = response["completion"] message = AIMessage(content=completion)
https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
88c42b1ab8d0-3
completion = response["completion"] message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)]) [docs] def get_num_tokens(self, text: str) -> int: """Calculate number of tokens.""" if not self.count_tokens: raise NameError("Please ensure the anthropic package is loaded") return self.count_tokens(text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
6230fbfd746e-0
Source code for langchain.chat_models.google_palm """Wrapper around Google's PaLM Chat API.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import google.generativeai as genai logger = logging.getLogger(__name__) class ChatGooglePalmError(Exception): pass def _truncate_at_stop_tokens( text: str, stop: Optional[List[str]], ) -> str: """Truncates text at the earliest stop token found.""" if stop is None: return text for stop_token in stop: stop_token_idx = text.find(stop_token) if stop_token_idx != -1: text = text[:stop_token_idx] return text def _response_to_result( response: genai.types.ChatResponse, stop: Optional[List[str]], ) -> ChatResult: """Converts a PaLM API response into a LangChain ChatResult.""" if not response.candidates: raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-1
raise ChatGooglePalmError("ChatResponse must have at least one candidate.") generations: List[ChatGeneration] = [] for candidate in response.candidates: author = candidate.get("author") if author is None: raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}") content = _truncate_at_stop_tokens(candidate.get("content", ""), stop) if content is None: raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}") if author == "ai": generations.append( ChatGeneration(text=content, message=AIMessage(content=content)) ) elif author == "human": generations.append( ChatGeneration( text=content, message=HumanMessage(content=content), ) ) else: generations.append( ChatGeneration( text=content, message=ChatMessage(role=author, content=content), ) ) return ChatResult(generations=generations) def _messages_to_prompt_dict( input_messages: List[BaseMessage], ) -> genai.types.MessagePromptDict: """Converts a list of LangChain messages into a PaLM API MessagePrompt structure.""" import google.generativeai as genai context: str = "" examples: List[genai.types.MessageDict] = [] messages: List[genai.types.MessageDict] = [] remaining = list(enumerate(input_messages)) while remaining: index, input_message = remaining.pop(0) if isinstance(input_message, SystemMessage): if index != 0: raise ChatGooglePalmError("System message must be first input message.")
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-2
raise ChatGooglePalmError("System message must be first input message.") context = input_message.content elif isinstance(input_message, HumanMessage) and input_message.example: if messages: raise ChatGooglePalmError( "Message examples must come before other messages." ) _, next_input_message = remaining.pop(0) if isinstance(next_input_message, AIMessage) and next_input_message.example: examples.extend( [ genai.types.MessageDict( author="human", content=input_message.content ), genai.types.MessageDict( author="ai", content=next_input_message.content ), ] ) else: raise ChatGooglePalmError( "Human example message must be immediately followed by an " " AI example response." ) elif isinstance(input_message, AIMessage) and input_message.example: raise ChatGooglePalmError( "AI example message must be immediately preceded by a Human " "example message." ) elif isinstance(input_message, AIMessage): messages.append( genai.types.MessageDict(author="ai", content=input_message.content) ) elif isinstance(input_message, HumanMessage): messages.append( genai.types.MessageDict(author="human", content=input_message.content) ) elif isinstance(input_message, ChatMessage): messages.append( genai.types.MessageDict( author=input_message.role, content=input_message.content ) ) else: raise ChatGooglePalmError( "Messages without an explicit role not supported by PaLM API." ) return genai.types.MessagePromptDict( context=context, examples=examples,
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-3
return genai.types.MessagePromptDict( context=context, examples=examples, messages=messages, ) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import google.api_core.exceptions multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _chat_with_retry(**kwargs: Any) -> Any: return llm.client.chat(**kwargs) return _chat_with_retry(**kwargs) async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator async def _achat_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.chat_async(**kwargs)
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-4
return await llm.client.chat_async(**kwargs) return await _achat_with_retry(**kwargs) [docs]class ChatGooglePalm(BaseChatModel, BaseModel): """Wrapper around Google's PaLM Chat API. To use you must have the google.generativeai Python package installed and either: 1. The ``GOOGLE_API_KEY``` environment varaible set with your API key, or 2. Pass your API key using the google_api_key kwarg to the ChatGoogle constructor. Example: .. code-block:: python from langchain.chat_models import ChatGooglePalm chat = ChatGooglePalm() """ client: Any #: :meta private: model_name: str = "models/chat-bison-001" """Model name to use.""" google_api_key: Optional[str] = None temperature: Optional[float] = None """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists, temperature, top_p, and top_k."""
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-5
"""Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ChatGooglePalmError( "Could not import google.generativeai python package. " "Please install it with `pip install google-generativeai`" ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = chat_with_retry( self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, ) return _response_to_result(response, stop)
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
6230fbfd746e-6
candidate_count=self.n, ) return _response_to_result(response, stop) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = await achat_with_retry( self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, ) return _response_to_result(response, stop) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "n": self.n, } @property def _llm_type(self) -> str: return "google-palm-chat" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
606a1526e321-0
Source code for langchain.chat_models.azure_openai """Azure OpenAI chat wrapper.""" from __future__ import annotations import logging from typing import Any, Dict, Mapping from pydantic import root_validator from langchain.chat_models.openai import ChatOpenAI from langchain.schema import ChatResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureChatOpenAI(ChatOpenAI): """Wrapper around Azure OpenAI Chat Completion API. To use this class you must have a deployed model on Azure OpenAI. Use `deployment_name` in the constructor to refer to the "Model deployment name" in the Azure portal. In addition, you should have the ``openai`` python package installed, and the following environment variables set or passed in constructor in lower case: - ``OPENAI_API_TYPE`` (default: ``azure``) - ``OPENAI_API_KEY`` - ``OPENAI_API_BASE`` - ``OPENAI_API_VERSION`` - ``OPENAI_PROXY`` For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name `35-turbo-dev`, the constructor should look like: .. code-block:: python AzureChatOpenAI( deployment_name="35-turbo-dev", openai_api_version="2023-03-15-preview", ) Be aware the API version may change. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. """ deployment_name: str = "" openai_api_type: str = "azure" openai_api_base: str = ""
https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
606a1526e321-1
openai_api_base: str = "" openai_api_version: str = "" openai_api_key: str = "" openai_organization: str = "" openai_proxy: str = "" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY", ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", ) openai_api_version = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) openai_api_type = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai openai.api_type = openai_api_type openai.api_base = openai_api_base openai.api_version = openai_api_version openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization if openai_proxy:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
606a1526e321-2
openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { **super()._default_params, "engine": self.deployment_name, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**self._default_params} @property def _llm_type(self) -> str: return "azure-openai-chat" def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
606a1526e321-3
if res.get("finish_reason", None) == "content_filter": raise ValueError( "Azure has not provided the response due to a content" " filter being triggered" ) return super()._create_chat_result(response) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
0252e4958af1-0
Source code for langchain.chat_models.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models import ChatOpenAI from langchain.schema import BaseMessage, ChatResult [docs]class PromptLayerChatOpenAI(ChatOpenAI): """Wrapper around OpenAI Chat large language models and PromptLayer. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerChatOpenAI adds to optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.chat_models import PromptLayerChatOpenAI openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult:
https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
0252e4958af1-1
) -> ChatResult: """Call ChatOpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(messages, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() message_dicts, params = super()._create_message_dicts(messages, stop) for i, generation in enumerate(generated_responses.generations): response_dict, params = super()._create_message_dicts( [generation.message], stop ) pl_request_id = promptlayer_api_request( "langchain.PromptLayerChatOpenAI", "langchain", message_dicts, params, self.pl_tags, response_dict, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: """Call ChatOpenAI agenerate and then call PromptLayer to log.""" from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(messages, stop, run_manager)
https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
0252e4958af1-2
generated_responses = await super()._agenerate(messages, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() message_dicts, params = super()._create_message_dicts(messages, stop) for i, generation in enumerate(generated_responses.generations): response_dict, params = super()._create_message_dicts( [generation.message], stop ) pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerChatOpenAI.async", "langchain", message_dicts, params, self.pl_tags, response_dict, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses @property def _llm_type(self) -> str: return "promptlayer-openai-chat" @property def _identifying_params(self) -> Mapping[str, Any]: return { **super()._identifying_params, "pl_tags": self.pl_tags, "return_pl_id": self.return_pl_id, } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
1a2e398818b5-0
.md .pdf Concepts Contents Chain of Thought Action Plan Generation ReAct Self-ask Prompt Chaining Memetic Proxy Self Consistency Inception MemPrompt Concepts# These are concepts and terminology commonly used when developing LLM applications. It contains reference to external papers or sources where the concept was first introduced, as well as to places in LangChain where the concept is used. Chain of Thought# Chain of Thought (CoT) is a prompting technique used to encourage the model to generate a series of intermediate reasoning steps. A less formal way to induce this behavior is to include “Let’s think step-by-step” in the prompt. Chain-of-Thought Paper Step-by-Step Paper Action Plan Generation# Action Plan Generation is a prompting technique that uses a language model to generate actions to take. The results of these actions can then be fed back into the language model to generate a subsequent action. WebGPT Paper SayCan Paper ReAct# ReAct is a prompting technique that combines Chain-of-Thought prompting with action plan generation. This induces the model to think about what action to take, then take it. Paper LangChain Example Self-ask# Self-ask is a prompting method that builds on top of chain-of-thought prompting. In this method, the model explicitly asks itself follow-up questions, which are then answered by an external search engine. Paper LangChain Example Prompt Chaining# Prompt Chaining is combining multiple LLM calls, with the output of one-step being the input to the next. PromptChainer Paper Language Model Cascades ICE Primer Book Socratic Models Memetic Proxy# Memetic Proxy is encouraging the LLM to respond in a certain way framing the discussion in a context that the model knows of and that
https://python.langchain.com/en/latest/getting_started/concepts.html
1a2e398818b5-1
to respond in a certain way framing the discussion in a context that the model knows of and that will result in that type of response. For example, as a conversation between a student and a teacher. Paper Self Consistency# Self Consistency is a decoding strategy that samples a diverse set of reasoning paths and then selects the most consistent answer. Is most effective when combined with Chain-of-thought prompting. Paper Inception# Inception is also called First Person Instruction. It is encouraging the model to think a certain way by including the start of the model’s response in the prompt. Example MemPrompt# MemPrompt maintains a memory of errors and user feedback, and uses them to prevent repetition of mistakes. Paper previous Quickstart Guide next Tutorials Contents Chain of Thought Action Plan Generation ReAct Self-ask Prompt Chaining Memetic Proxy Self Consistency Inception MemPrompt By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/getting_started/concepts.html
d65b9cc6693d-0
.md .pdf Quickstart Guide Contents Installation Environment Setup Building a Language Model Application: LLMs LLMs: Get predictions from a language model Prompt Templates: Manage prompts for LLMs Chains: Combine LLMs and prompts in multi-step workflows Agents: Dynamically Call Chains Based on User Input Memory: Add State to Chains and Agents Building a Language Model Application: Chat Models Get Message Completions from a Chat Model Chat Prompt Templates Chains with Chat Models Agents with Chat Models Memory: Add State to Chains and Agents Quickstart Guide# This tutorial gives you a quick walkthrough about building an end-to-end language model application with LangChain. Installation# To get started, install LangChain with the following command: pip install langchain # or conda install langchain -c conda-forge Environment Setup# Using LangChain will usually require integrations with one or more model providers, data stores, apis, etc. For this example, we will be using OpenAI’s APIs, so we will first need to install their SDK: pip install openai We will then need to set the environment variable in the terminal. export OPENAI_API_KEY="..." Alternatively, you could do this from inside the Jupyter notebook (or Python script): import os os.environ["OPENAI_API_KEY"] = "..." If you want to set the API key dynamically, you can use the openai_api_key parameter when initiating OpenAI class—for instance, each user’s API key. from langchain.llms import OpenAI llm = OpenAI(openai_api_key="OPENAI_API_KEY") Building a Language Model Application: LLMs# Now that we have installed LangChain and set up our environment, we can start building our language model application.
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-1
LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications. LLMs: Get predictions from a language model# The most basic building block of LangChain is calling an LLM on some input. Let’s walk through a simple example of how to do this. For this purpose, let’s pretend we are building a service that generates a company name based on what the company makes. In order to do this, we first need to import the LLM wrapper. from langchain.llms import OpenAI We can then initialize the wrapper with any arguments. In this example, we probably want the outputs to be MORE random, so we’ll initialize it with a HIGH temperature. llm = OpenAI(temperature=0.9) We can now call it on some input! text = "What would be a good company name for a company that makes colorful socks?" print(llm(text)) Feetful of Fun For more details on how to use LLMs within LangChain, see the LLM getting started guide. Prompt Templates: Manage prompts for LLMs# Calling an LLM is a great first step, but it’s just the beginning. Normally when you use an LLM in an application, you are not sending user input directly to the LLM. Instead, you are probably taking user input and constructing a prompt, and then sending that to the LLM. For example, in the previous example, the text we passed in was hardcoded to ask for a name for a company that made colorful socks. In this imaginary service, what we would want to do is take only the user input describing what the company does, and then format the prompt with that information. This is easy to do with LangChain! First lets define the prompt template:
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-2
This is easy to do with LangChain! First lets define the prompt template: from langchain.prompts import PromptTemplate prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) Let’s now see how this works! We can call the .format method to format it. print(prompt.format(product="colorful socks")) What is a good name for a company that makes colorful socks? For more details, check out the getting started guide for prompts. Chains: Combine LLMs and prompts in multi-step workflows# Up until now, we’ve worked with the PromptTemplate and LLM primitives by themselves. But of course, a real application is not just one primitive, but rather a combination of them. A chain in LangChain is made up of links, which can be either primitives like LLMs or other chains. The most core type of chain is an LLMChain, which consists of a PromptTemplate and an LLM. Extending the previous example, we can construct an LLMChain which takes user input, formats it with a PromptTemplate, and then passes the formatted response to an LLM. from langchain.prompts import PromptTemplate from langchain.llms import OpenAI llm = OpenAI(temperature=0.9) prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) We can now create a very simple chain that will take user input, format the prompt with it, and then send it to the LLM: from langchain.chains import LLMChain chain = LLMChain(llm=llm, prompt=prompt) Now we can run that chain only specifying the product! chain.run("colorful socks")
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-3
Now we can run that chain only specifying the product! chain.run("colorful socks") # -> '\n\nSocktastic!' There we go! There’s the first chain - an LLM Chain. This is one of the simpler types of chains, but understanding how it works will set you up well for working with more complex chains. For more details, check out the getting started guide for chains. Agents: Dynamically Call Chains Based on User Input# So far the chains we’ve looked at run in a predetermined order. Agents no longer do: they use an LLM to determine which actions to take and in what order. An action can either be using a tool and observing its output, or returning to the user. When used correctly agents can be extremely powerful. In this tutorial, we show you how to easily use agents through the simplest, highest level API. In order to load agents, you should understand the following concepts: Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, Python REPL, other chains. The interface for a tool is currently a function that is expected to have a string as an input, with a string as an output. LLM: The language model powering the agent. Agent: The agent to use. This should be a string that references a support agent class. Because this notebook focuses on the simplest, highest level API, this only covers using the standard supported agents. If you want to implement a custom agent, see the documentation for custom agents (coming soon). Agents: For a list of supported agents and their specifications, see here. Tools: For a list of predefined tools and their specifications, see here. For this example, you will also need to install the SerpAPI Python package. pip install google-search-results And set the appropriate environment variables. import os
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-4
pip install google-search-results And set the appropriate environment variables. import os os.environ["SERPAPI_API_KEY"] = "..." Now we can get started! from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType from langchain.llms import OpenAI # First, let's load the language model we're going to use to control the agent. llm = OpenAI(temperature=0) # Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in. tools = load_tools(["serpapi", "llm-math"], llm=llm) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) # Now let's test it out! agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?") > Entering new AgentExecutor chain... I need to find the temperature first, then use the calculator to raise it to the .023 power. Action: Search Action Input: "High temperature in SF yesterday" Observation: San Francisco Temperature Yesterday. Maximum temperature yesterday: 57 °F (at 1:56 pm) Minimum temperature yesterday: 49 °F (at 1:56 am) Average temperature ... Thought: I now have the temperature, so I can use the calculator to raise it to the .023 power. Action: Calculator Action Input: 57^.023 Observation: Answer: 1.0974509573251117 Thought: I now know the final answer
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-5
Thought: I now know the final answer Final Answer: The high temperature in SF yesterday in Fahrenheit raised to the .023 power is 1.0974509573251117. > Finished chain. Memory: Add State to Chains and Agents# So far, all the chains and agents we’ve gone through have been stateless. But often, you may want a chain or agent to have some concept of “memory” so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of “short-term memory”. On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of “long-term memory”. For more concrete ideas on the latter, see this awesome paper. LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the ConversationChain) with two different types of memory. By default, the ConversationChain has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let’s take a look at using this chain (setting verbose=True so we can see the prompt). from langchain import OpenAI, ConversationChain llm = OpenAI(temperature=0) conversation = ConversationChain(llm=llm, verbose=True) output = conversation.predict(input="Hi there!") print(output) > Entering new chain... Prompt after formatting: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: Human: Hi there! AI:
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-6
Current conversation: Human: Hi there! AI: > Finished chain. ' Hello! How are you today?' output = conversation.predict(input="I'm doing well! Just having a conversation with an AI.") print(output) > Entering new chain... Prompt after formatting: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: Human: Hi there! AI: Hello! How are you today? Human: I'm doing well! Just having a conversation with an AI. AI: > Finished chain. " That's great! What would you like to talk about?" Building a Language Model Application: Chat Models# Similarly, you can use chat models instead of LLMs. Chat models are a variation on language models. While chat models use language models under the hood, the interface they expose is a bit different: rather than expose a “text in, text out” API, they expose an interface where “chat messages” are the inputs and outputs. Chat model APIs are fairly new, so we are still figuring out the correct abstractions. Get Message Completions from a Chat Model# You can get chat completions by passing one or more messages to the chat model. The response will be a message. The types of messages currently supported in LangChain are AIMessage, HumanMessage, SystemMessage, and ChatMessage – ChatMessage takes in an arbitrary role parameter. Most of the time, you’ll just be dealing with HumanMessage, AIMessage, and SystemMessage. from langchain.chat_models import ChatOpenAI from langchain.schema import ( AIMessage, HumanMessage, SystemMessage )
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-7
AIMessage, HumanMessage, SystemMessage ) chat = ChatOpenAI(temperature=0) You can get completions by passing in a single message. chat([HumanMessage(content="Translate this sentence from English to French. I love programming.")]) # -> AIMessage(content="J'aime programmer.", additional_kwargs={}) You can also pass in multiple messages for OpenAI’s gpt-3.5-turbo and gpt-4 models. messages = [ SystemMessage(content="You are a helpful assistant that translates English to French."), HumanMessage(content="I love programming.") ] chat(messages) # -> AIMessage(content="J'aime programmer.", additional_kwargs={}) You can go one step further and generate completions for multiple sets of messages using generate. This returns an LLMResult with an additional message parameter: batch_messages = [ [ SystemMessage(content="You are a helpful assistant that translates English to French."), HumanMessage(content="I love programming.") ], [ SystemMessage(content="You are a helpful assistant that translates English to French."), HumanMessage(content="I love artificial intelligence.") ], ] result = chat.generate(batch_messages) result # -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}}) You can recover things like token usage from this LLMResult:
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-8
You can recover things like token usage from this LLMResult: result.llm_output['token_usage'] # -> {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77} Chat Prompt Templates# Similar to LLMs, you can make use of templating by using a MessagePromptTemplate. You can build a ChatPromptTemplate from one or more MessagePromptTemplates. You can use ChatPromptTemplate’s format_prompt – this returns a PromptValue, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model. For convenience, there is a from_template method exposed on the template. If you were to use this template, this is what it would look like: from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) chat = ChatOpenAI(temperature=0) template = "You are a helpful assistant that translates {input_language} to {output_language}." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template = "{text}" human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) # get a chat completion from the formatted messages chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages()) # -> AIMessage(content="J'aime programmer.", additional_kwargs={}) Chains with Chat Models# The LLMChain discussed in the above section can be used with chat models as well: from langchain.chat_models import ChatOpenAI from langchain import LLMChain from langchain.prompts.chat import (
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-9
from langchain import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) chat = ChatOpenAI(temperature=0) template = "You are a helpful assistant that translates {input_language} to {output_language}." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template = "{text}" human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) chain = LLMChain(llm=chat, prompt=chat_prompt) chain.run(input_language="English", output_language="French", text="I love programming.") # -> "J'aime programmer." Agents with Chat Models# Agents can also be used with chat models, you can initialize one using AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION as the agent type. from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI # First, let's load the language model we're going to use to control the agent. chat = ChatOpenAI(temperature=0) # Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in. llm = OpenAI(temperature=0) tools = load_tools(["serpapi", "llm-math"], llm=llm) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-10
agent = initialize_agent(tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True) # Now let's test it out! agent.run("Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?") > Entering new AgentExecutor chain... Thought: I need to use a search engine to find Olivia Wilde's boyfriend and a calculator to raise his age to the 0.23 power. Action: { "action": "Search", "action_input": "Olivia Wilde boyfriend" } Observation: Sudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling. Thought:I need to use a search engine to find Harry Styles' current age. Action: { "action": "Search", "action_input": "Harry Styles age" } Observation: 29 years Thought:Now I need to calculate 29 raised to the 0.23 power. Action: { "action": "Calculator", "action_input": "29^0.23" } Observation: Answer: 2.169459462491557 Thought:I now know the final answer. Final Answer: 2.169459462491557 > Finished chain. '2.169459462491557' Memory: Add State to Chains and Agents#
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-11
'2.169459462491557' Memory: Add State to Chains and Agents# You can use Memory with chains and agents initialized with chat models. The main difference between this and Memory for LLMs is that rather than trying to condense all previous messages into a string, we can keep them as their own unique memory object. from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate ) from langchain.chains import ConversationChain from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory prompt = ChatPromptTemplate.from_messages([ SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."), MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template("{input}") ]) llm = ChatOpenAI(temperature=0) memory = ConversationBufferMemory(return_messages=True) conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm) conversation.predict(input="Hi there!") # -> 'Hello! How can I assist you today?' conversation.predict(input="I'm doing well! Just having a conversation with an AI.") # -> "That sounds like fun! I'm happy to chat with you. Is there anything specific you'd like to talk about?" conversation.predict(input="Tell me about yourself.")
https://python.langchain.com/en/latest/getting_started/getting_started.html
d65b9cc6693d-12
conversation.predict(input="Tell me about yourself.") # -> "Sure! I am an AI language model created by OpenAI. I was trained on a large dataset of text from the internet, which allows me to understand and generate human-like language. I can answer questions, provide information, and even have conversations like this one. Is there anything else you'd like to know about me?" previous Welcome to LangChain next Concepts Contents Installation Environment Setup Building a Language Model Application: LLMs LLMs: Get predictions from a language model Prompt Templates: Manage prompts for LLMs Chains: Combine LLMs and prompts in multi-step workflows Agents: Dynamically Call Chains Based on User Input Memory: Add State to Chains and Agents Building a Language Model Application: Chat Models Get Message Completions from a Chat Model Chat Prompt Templates Chains with Chat Models Agents with Chat Models Memory: Add State to Chains and Agents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/getting_started/getting_started.html
343e8c2dfb9d-0
.md .pdf Tutorials Contents Tutorials# This is a collection of LangChain tutorials mostly on YouTube. ⛓ icon marks a new video [last update 2023-05-15] # LangChain AI Handbook By James Briggs and Francisco Ingham # LangChain Tutorials by Edrick: ⛓ LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF LangChain Crash Course: Build an AutoGPT app in 25 minutes by Nicholas Renotte LangChain Crash Course - Build apps with language models by Patrick Loeber LangChain Explained in 13 Minutes | QuickStart Tutorial for Beginners by Rabbitmetrics # LangChain for Gen AI and LLMs by James Briggs: #1 Getting Started with GPT-3 vs. Open Source LLMs #2 Prompt Templates for GPT 3.5 and other LLMs #3 LLM Chains using GPT 3.5 and other LLMs #4 Chatbot Memory for Chat-GPT, Davinci + other LLMs #5 Chat with OpenAI in LangChain ⛓ #6 Fixing LLM Hallucinations with Retrieval Augmentation in LangChain ⛓ #7 LangChain Agents Deep Dive with GPT 3.5 ⛓ #8 Create Custom Tools for Chatbots in LangChain ⛓ #9 Build Conversational Agents with Vector DBs # LangChain 101 by Data Independent: What Is LangChain? - LangChain + ChatGPT Overview Quickstart Guide Beginner Guide To 7 Essential Concepts OpenAI + Wolfram Alpha Ask Questions On Your Custom (or Private) Files Connect Google Drive Files To OpenAI YouTube Transcripts + OpenAI Question A 300 Page Book (w/ OpenAI + Pinecone)
https://python.langchain.com/en/latest/getting_started/tutorials.html
343e8c2dfb9d-1
Question A 300 Page Book (w/ OpenAI + Pinecone) Workaround OpenAI's Token Limit With Chain Types Build Your Own OpenAI + LangChain Web App in 23 Minutes Working With The New ChatGPT API OpenAI + LangChain Wrote Me 100 Custom Sales Emails Structured Output From OpenAI (Clean Dirty Data) Connect OpenAI To +5,000 Tools (LangChain + Zapier) Use LLMs To Extract Data From Text (Expert Mode) ⛓ Extract Insights From Interview Transcripts Using LLMs ⛓ 5 Levels Of LLM Summarizing: Novice to Expert # LangChain How to and guides by Sam Witteveen: LangChain Basics - LLMs & PromptTemplates with Colab LangChain Basics - Tools and Chains ChatGPT API Announcement & Code Walkthrough with LangChain Conversations with Memory (explanation & code walkthrough) Chat with Flan20B Using Hugging Face Models locally (code walkthrough) PAL : Program-aided Language Models with LangChain code Building a Summarization System with LangChain and GPT-3 - Part 1 Building a Summarization System with LangChain and GPT-3 - Part 2 Microsoft’s Visual ChatGPT using LangChain LangChain Agents - Joining Tools and Chains with Decisions Comparing LLMs with LangChain Using Constitutional AI in LangChain Talking to Alpaca with LangChain - Creating an Alpaca Chatbot Talk to your CSV & Excel with LangChain BabyAGI: Discover the Power of Task-Driven Autonomous Agents! Improve your BabyAGI with LangChain ⛓ Master PDF Chat with LangChain - Your essential guide to queries on documents ⛓ Using LangChain with DuckDuckGO Wikipedia & PythonREPL Tools
https://python.langchain.com/en/latest/getting_started/tutorials.html