repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/__init__.py
# flake8: noqa __all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/audio.py
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _ffmpeg_warned, _librosa_warned, _audioread_warned = False, False, False @dataclass class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for archived files with sequential access. - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. This is useful for archived files with sequential access. Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. Example: ```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` """ sampling_rate: Optional[int] = None mono: bool = True decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict]) -> dict: """Encode example into a format for Arrow. Args: value (`str` or `dict`): Data passed as input to Audio feature. Returns: `dict` """ try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, bytes): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"], value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767 buffer = BytesIO(bytes()) sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys: - `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `dict` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err audio_format = xsplitext(path)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] try: repo_id = string_to_dict(source_url, config.HUB_DATASETS_URL)["repo_id"] token = token_per_repo_id[repo_id] except (ValueError, KeyError): token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: array, sampling_rate = sf.read(f) else: array, sampling_rate = sf.read(file) array = array.T if self.mono: array = librosa.to_mono(array) if self.sampling_rate and self.sampling_rate != sampling_rate: array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) sampling_rate = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), } def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ @no_op_if_value_is_null def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/features.py
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ This class handle features definition in datasets and some utilities to display table type.""" import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages logger = logging.get_logger(__name__) def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" ) def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]: """ Cast pytorch/tensorflow/pandas objects to python numpy array/lists. It works recursively. If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast. only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object has_changed (bool): True if the object has been changed, False if it is identical """ if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(obj, np.ndarray): if obj.ndim == 0: return obj[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj, False else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj ], True, ) elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor): if obj.ndim == 0: return obj.detach().cpu().numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.detach().cpu().numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.detach().cpu().numpy() ], True, ) elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor): if obj.ndim == 0: return obj.numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.numpy() ], True, ) elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray): if obj.ndim == 0: return np.asarray(obj)[()], True elif not only_1d_for_numpy or obj.ndim == 1: return np.asarray(obj), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in np.asarray(obj) ], True, ) elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image): return encode_pil_image(obj), True elif isinstance(obj, pd.Series): return ( _cast_to_python_objects( obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, pd.DataFrame): return ( { key: _cast_to_python_objects( value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for key, value in obj.to_dict("list").items() }, True, ) elif isinstance(obj, pd.Timestamp): return obj.to_pydatetime(), True elif isinstance(obj, pd.Timedelta): return obj.to_pytimedelta(), True elif isinstance(obj, Mapping): has_changed = not isinstance(obj, dict) output = {} for k, v in obj.items(): casted_v, has_changed_v = _cast_to_python_objects( v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) has_changed |= has_changed_v output[k] = casted_v return output if has_changed else obj, has_changed elif hasattr(obj, "__array__"): return ( _cast_to_python_objects( obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, (list, tuple)): if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt): break casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects( first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) if has_changed_first_elmt or not optimize_list_casting: return ( [ _cast_to_python_objects( elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for elmt in obj ], True, ) else: if isinstance(obj, (list, tuple)): return obj, False else: return list(obj), True else: return obj, False else: return obj, False def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] @dataclass class Value: """ The `Value` dtypes are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value @dataclass class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) @dataclass class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) @dataclass class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) @dataclass class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.PyExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.PyExtensionType.__init__(self, self.storage_dtype) def __reduce__(self): return self.__class__, ( self.shape, self.value_type, ) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2 class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3 class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4 class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5 def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type)) class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays) return numpy_arr def to_pylist(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist() class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray """ if dtype == object: out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True) @classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy) @classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError() def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False) def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any(): took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False) def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all() def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type) @dataclass class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) @staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names @dataclass class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image, ] def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def get_nested_type(schema: FeatureType) -> pa.DataType: """ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of generate_from_arrow_type(). It performs double-duty as the implementation of Features.type and handles the conversion of datasets.Feature->pa.struct """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, Features): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # Features is subclass of dict, and dict order is deterministic since Python 3.6 elif isinstance(schema, dict): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # however don't sort on struct types since the order matters elif isinstance(schema, (list, tuple)): if len(schema) != 1: raise ValueError("When defining list feature, you should just provide one example of the inner type") value_type = get_nested_type(schema[0]) return pa.list_(value_type) elif isinstance(schema, Sequence): value_type = get_nested_type(schema.feature) # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type}) return pa.list_(value_type, schema.length) # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods) return schema() def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( { k: encode_nested_example(sub_schema, sub_obj, level=level + 1) for k, (sub_schema, sub_obj) in zip_dict(schema, obj) } if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, Sequence): if obj is None: return None # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o, level=level + 1) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o, level=level + 1) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if ( not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt ): return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): # we pass the token to read and decode files from private repositories in streaming mode if obj is not None and schema.decode: return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) return obj def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} class_type = globals()[obj.pop("_type")] if class_type == Sequence: return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1)) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names}) def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): feature = generate_from_arrow_type(pa_type.value_type) if isinstance(feature, (dict, tuple, list)): return [feature] return Sequence(feature=feature) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DictionaryType): raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.") def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype) def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: """Visit a (possibly nested) feature. Args: feature (FeatureType): the feature type to be checked Returns: visited feature (FeatureType) """ if isinstance(feature, dict): out = func({k: _visit(f, func) for k, f in feature.items()}) elif isinstance(feature, (list, tuple)): out = func([_visit(feature[0], func)]) elif isinstance(feature, Sequence): out = func(Sequence(_visit(feature.feature, func), length=feature.length)) else: out = func(feature) return feature if out is None else out def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, Sequence): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True) def require_storage_cast(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires storage casting. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "cast_storage") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: return Features.from_dict(metadata["info"]["features"]) obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema} return cls(**obj) @classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return to_yaml_inner(yaml_data)["struct"] @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # for audio and image that are Audio and Image types, not Value return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' )
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/features/translation.py
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`FeatureConnector` for translations with fixed languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: List[str] id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class TranslationVariableLanguages: """`FeatureConnector` for translations with variable languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[List] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if self.languages and set(translation_dict) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/__init__.py
__all__ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/mock_download_manager.py
# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Mock download manager interface.""" import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version logger = get_logger(__name__) class MockDownloadManager: dummy_file_name = "dummy_data" datasets_scripts_dir = "datasets" is_streaming = False def __init__( self, dataset_name: str, config: str, version: Union[Version, str], cache_dir: Optional[str] = None, use_local_dummy_data: bool = False, load_existing_dummy_data: bool = True, download_callbacks: Optional[List[Callable]] = None, ): self.downloaded_size = 0 self.dataset_name = dataset_name self.cache_dir = cache_dir self.use_local_dummy_data = use_local_dummy_data self.config = config # download_callbacks take a single url as input self.download_callbacks: List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root self.load_existing_dummy_data = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general self.version_name = str(version) # to be downloaded self._dummy_file = None self._bucket_url = None @property def dummy_file(self): if self._dummy_file is None: self._dummy_file = self.download_dummy_data() return self._dummy_file @property def dummy_data_folder(self): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy", self.config.name, self.version_name) # structure is dummy / version_name return os.path.join("dummy", self.version_name) @property def dummy_zip_file(self): return os.path.join(self.dummy_data_folder, "dummy_data.zip") def download_dummy_data(self): path_to_dummy_data_dir = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) local_path = cached_path( path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True ) return os.path.join(local_path, self.dummy_file_name) @property def local_path_to_dummy_data(self): return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file) @property def github_path_to_dummy_data(self): if self._bucket_url is None: self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/")) return self._bucket_url @property def manual_dir(self): # return full path if its a dir if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1]) # this function has to be in the manager under this name so that testing works def download_and_extract(self, data_url, *args): if self.load_existing_dummy_data: # dummy data is downloaded and tested dummy_file = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned dummy_file = self.dummy_file_name # special case when data_url is a dict if isinstance(data_url, dict): return self.create_dummy_data_dict(dummy_file, data_url) elif isinstance(data_url, (list, tuple)): return self.create_dummy_data_list(dummy_file, data_url) else: return self.create_dummy_data_single(dummy_file, data_url) # this function has to be in the manager under this name so that testing works def download(self, data_url, *args): return self.download_and_extract(data_url) # this function has to be in the manager under this name so that testing works def download_custom(self, data_url, custom_download): return self.download_and_extract(data_url) # this function has to be in the manager under this name so that testing works def extract(self, path, *args, **kwargs): return path # this function has to be in the manager under this name so that testing works def get_recorded_sizes_checksums(self): return {} def create_dummy_data_dict(self, path_to_dummy_data, data_url): dummy_data_dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(single_urls, list): for single_url in single_urls: download_callback(single_url) else: single_url = single_urls download_callback(single_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(single_urls, list): value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls] else: single_url = single_urls value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name)) dummy_data_dict[key] = value # make sure that values are unique if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values() ): # append key to value to make its name unique dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def create_dummy_data_list(self, path_to_dummy_data, data_url): dummy_data_list = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url) is_pubmed_records = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): data_url = [data_url[0]] * len(data_url) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(single_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1])) dummy_data_list.append(value) return dummy_data_list def create_dummy_data_single(self, path_to_dummy_data, data_url): for download_callback in self.download_callbacks: download_callback(data_url) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1])) if os.path.exists(value) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def delete_extracted_files(self): pass def manage_extracted_files(self): pass def iter_archive(self, path): def _iter_archive_members(path): # this preserves the order of the members inside the ZIP archive dummy_parent_path = Path(self.dummy_file).parent relative_path = path.relative_to(dummy_parent_path) with ZipFile(self.local_path_to_dummy_data) as zip_file: members = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(member) path = Path(path) file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*") for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__")): yield file_path.relative_to(path).as_posix(), file_path.open("rb") def iter_files(self, paths): if not isinstance(paths, list): paths = [paths] for path in paths: if os.path.isfile(path): if os.path.basename(path).startswith((".", "__")): return yield path else: for dirpath, dirnames, filenames in os.walk(path): if os.path.basename(dirpath).startswith((".", "__")): continue dirnames.sort() for filename in sorted(filenames): if filename.startswith((".", "__")): continue yield os.path.join(dirpath, filename)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/download_config.py
import copy import warnings from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, Optional, Union from .. import config @dataclass class DownloadConfig: """Configuration for our cached path manager. Attributes: cache_dir (`str` or `Path`, *optional*): Specify a cache directory to save the file to (overwrite the default cache dir). force_download (`bool`, defaults to `False`): If `True`, re-dowload the file even if it's already cached in the cache dir. resume_download (`bool`, defaults to `False`): If `True`, resume the download if an incompletely received file is found. proxies (`dict`, *optional*): user_agent (`str`, *optional*): Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file (`bool`, defaults to `False`): If `True` and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract (`bool`, defaults to `False`): If `True` when `extract_compressed_file` is `True` and the archive was already extracted, re-extract the archive and override the folder where it was extracted. delete_extracted (`bool`, defaults to `False`): Whether to delete (or keep) the extracted files. use_etag (`bool`, defaults to `True`): Whether to use the ETag HTTP response header to validate the cached files. num_proc (`int`, *optional*): The number of processes to launch to download the files in parallel. max_retries (`int`, default to `1`): The number of times to retry an HTTP request if it fails. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. use_auth_token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`. <Deprecated version="2.14.0"> `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. </Deprecated> ignore_url_params (`bool`, defaults to `False`): Whether to strip all query parameters and fragments from the download URL before using it for caching the file. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the dataset file-system backend, if any. download_desc (`str`, *optional*): A description to be displayed alongside with the progress bar while downloading the files. """ cache_dir: Optional[Union[str, Path]] = None force_download: bool = False resume_download: bool = False local_files_only: bool = False proxies: Optional[Dict] = None user_agent: Optional[str] = None extract_compressed_file: bool = False force_extract: bool = False delete_extracted: bool = False use_etag: bool = True num_proc: Optional[int] = None max_retries: int = 1 token: Optional[Union[str, bool]] = None use_auth_token = "deprecated" ignore_url_params: bool = False storage_options: Dict[str, Dict[str, Any]] = field(default_factory=dict) download_desc: Optional[str] = None def __post_init__(self): if self.use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={self.use_auth_token}' instead.", FutureWarning, ) self.token = self.use_auth_token if "hf" not in self.storage_options: self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT} def copy(self) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/streaming_download_manager.py
import glob import io import os import posixpath import re import tarfile import time import xml.dom.minidom import zipfile from asyncio import TimeoutError from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from xml.etree import ElementTree as ET import fsspec from aiohttp.client_exceptions import ClientError from .. import config from ..filesystems import COMPRESSION_FILESYSTEMS from ..utils.file_utils import ( get_authentication_headers_for_url, get_datasets_user_agent, http_head, is_local_path, is_relative_path, url_or_path_join, ) from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) size = fs.size(main_hop) if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except (ClientError, TimeoutError) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out file_obj.read = read_with_retries def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, dict]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. """ token = None if download_config is None else download_config.token protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = {protocol: download_config.storage_options[protocol]} else: storage_options = {} if protocol in ["http", "https"]: storage_options[protocol] = { "headers": { **get_authentication_headers_for_url(urlpath, token=token), "user-agent": get_datasets_user_agent(), }, "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables. **(storage_options.get(protocol, {})), } if "drive.google.com" in urlpath: response = http_head(urlpath) cookies = None for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})} # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in urlpath and "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 storage_options[protocol] = {"block_size": 0, **storage_options.get(protocol, {})} elif protocol == "hf": storage_options = {"token": token, "endpoint": config.HF_ENDPOINT, **(storage_options or {})} return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") objects = fs.listdir(inner_path) return [os.path.basename(obj["name"]) for obj in objects] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) inner_path = main_hop.split("://")[1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: Tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) with xopen(urlpath, "rb", download_config=download_config) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): if xbasename(urlpath).startswith((".", "__")): # skipping hidden files return yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # skipping hidden directories; prune the search # [:] for the in-place list modification required by os.walk # (only works for local paths as fsspec's walk doesn't support the in-place modification) dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config) class StreamingDownloadManager: """ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives. Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract data, but they rather return the path or url that could be opened using the `xopen` function which extends the built-in `open` function to stream data from remote files. """ is_streaming = True def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, ): self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") self.download_config = download_config or DownloadConfig() @property def manual_dir(self): return self._data_dir def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls def _download(self, urlpath: str) -> str: urlpath = str(urlpath) if is_relative_path(urlpath): # append the relative path to the base_path urlpath = url_or_path_join(self._base_path, urlpath) return urlpath def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) return urlpaths def _extract(self, urlpath: str) -> str: urlpath = str(urlpath) protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz path = urlpath.split("::")[0] extension = _get_path_extension(path) if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")): raise NotImplementedError( f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. " f"Please use `dl_manager.iter_archive` instead.\n\n" f"Example usage:\n\n" f"\turl = dl_manager.download(url)\n" f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n" f"\tfor filename, file in tar_archive_iterator:\n" f"\t\t..." ) if protocol is None: # no extraction return urlpath elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(urlpath.split("::")[0]) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file return f"{protocol}://{inner_file}::{urlpath}" else: return f"{protocol}://::{urlpath}" def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """ return self.extract(self.download(url_or_urls)) def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(urlpath_or_buf, "read"): return ArchiveIterable.from_buf(urlpath_or_buf) else: return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/download/download_manager.py
# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Download manager interface.""" import enum import io import os import posixpath import tarfile import warnings import zipfile from datetime import datetime from functools import partial from itertools import chain from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from .. import config from ..utils.deprecation_utils import DeprecatedEnum from ..utils.file_utils import cached_path, get_from_cache, hash_url_to_filename, is_relative_path, url_or_path_join from ..utils.info_utils import get_size_checksum_dict from ..utils.logging import get_logger, is_progress_bar_enabled, tqdm from ..utils.py_utils import NestedDataStructure, map_nested, size_str from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class DownloadMode(enum.Enum): """`Enum` for how to treat pre-existing downloads and data. The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both raw downloads and the prepared dataset if they exist. The generations modes: | | Downloads | Dataset | |-------------------------------------|-----------|---------| | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse | | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh | | `FORCE_REDOWNLOAD` | Fresh | Fresh | """ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" FORCE_REDOWNLOAD = "force_redownload" class GenerateMode(DeprecatedEnum): REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" FORCE_REDOWNLOAD = "force_redownload" @property def help_message(self): return "Use 'DownloadMode' instead." def _get_path_extension(path: str) -> str: # Get extension: train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(path: str) -> Optional[str]: path = str(path) extension = _get_path_extension(path) # TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None with open(path, "rb") as f: return _get_extraction_protocol_with_magic_number(f) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath) with open(urlpath, "rb") as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_path(cls, urlpath_or_buf) -> "ArchiveIterable": return cls(cls._iter_from_path, urlpath_or_buf) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if os.path.isfile(urlpath): if os.path.basename(urlpath).startswith((".", "__")): # skipping hidden files return yield urlpath else: for dirpath, dirnames, filenames in os.walk(urlpath): # skipping hidden directories; prune the search # [:] for the in-place list modification required by os.walk dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if os.path.basename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield os.path.join(dirpath, filename) @classmethod def from_paths(cls, urlpaths) -> "FilesIterable": return cls(cls._iter_from_paths, urlpaths) class DownloadManager: is_streaming = False def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, record_checksums=True, ): """Download manager constructor. Args: data_dir: can be used to specify a manual directory to get the files from. dataset_name (`str`): name of dataset this instance will be used for. If provided, downloads will contain which datasets they were used for. download_config (`DownloadConfig`): to specify the cache directory and other download options base_path (`str`): base path that is used when relative paths are used to download files. This can be a remote url. record_checksums (`bool`, defaults to `True`): Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder. """ self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") # To record what is being used: {url: {num_bytes: int, checksum: str}} self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {} self.record_checksums = record_checksums self.download_config = download_config or DownloadConfig() self.downloaded_paths = {} self.extracted_paths = {} @property def manual_dir(self): return self._data_dir @property def downloaded_size(self): """Returns the total size of downloaded files.""" return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values()) @staticmethod def ship_files_with_pipeline(downloaded_path_or_paths, pipeline): """Ship the files using Beam FileSystems to the pipeline temp dir. Args: downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`): Nested structure containing the downloaded path(s). pipeline ([`utils.beam_utils.BeamPipeline`]): Apache Beam Pipeline. Returns: `str` or `list[str]` or `dict[str, str]` """ from ..utils.beam_utils import upload_local_to_remote remote_dir = pipeline._options.get_all_options().get("temp_location") if remote_dir is None: raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files") def upload(local_file_path): remote_file_path = posixpath.join( remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path) ) logger.info( f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}." ) upload_local_to_remote(local_file_path, remote_file_path) return remote_file_path uploaded_path_or_paths = map_nested( lambda local_file_path: upload(local_file_path), downloaded_path_or_paths, disable_tqdm=not is_progress_bar_enabled(), ) return uploaded_path_or_paths def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure): """Record size/checksum of downloaded files.""" delay = 5 for url, path in tqdm( list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())), delay=delay, desc="Computing checksums", disable=not is_progress_bar_enabled(), ): # call str to support PathLike objects self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict( path, record_checksum=self.record_checksums ) def download_custom(self, url_or_urls, custom_download): """ Download given urls(s) by calling `custom_download`. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. custom_download (`Callable[src_url, dst_path]`): The source URL and destination path. For example `tf.io.gfile.copy`, that lets you download from Google storage. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket) ``` """ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH max_retries = self.download_config.max_retries def url_to_downloaded_path(url): return os.path.join(cache_dir, hash_url_to_filename(url)) downloaded_path_or_paths = map_nested( url_to_downloaded_path, url_or_urls, disable_tqdm=not is_progress_bar_enabled() ) url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()): try: get_from_cache( url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries ) cached = True except FileNotFoundError: cached = False if not cached or self.download_config.force_download: custom_download(url, path) get_from_cache( url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries ) self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) return downloaded_path_or_paths.data def download(self, url_or_urls): """Download given URL(s). By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download. Each URL is a `str`. Returns: `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = False if download_config.download_desc is None: download_config.download_desc = "Downloading data" download_func = partial(self._download, download_config=download_config) start_time = datetime.now() downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, disable_tqdm=not is_progress_bar_enabled(), desc="Downloading data files", ) duration = datetime.now() - start_time logger.info(f"Downloading took {duration.total_seconds() // 60} min") url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) start_time = datetime.now() self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) duration = datetime.now() - start_time logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") return downloaded_path_or_paths.data def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str: url_or_filename = str(url_or_filename) if is_relative_path(url_or_filename): # append the relative path to the base_path url_or_filename = url_or_path_join(self._base_path, url_or_filename) return cached_path(url_or_filename, download_config=download_config) def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): """Iterate over files within an archive. Args: path_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(path_or_buf, "read"): return ArchiveIterable.from_buf(path_or_buf) else: return ArchiveIterable.from_path(path_or_buf) def iter_files(self, paths: Union[str, List[str]]): """Iterate over file paths. Args: paths (`str` or `list` of `str`): Root paths. Yields: `str`: File path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_paths(paths) def extract(self, path_or_paths, num_proc="deprecated"): """Extract given path(s). Args: path_or_paths (path or `list` or `dict`): Path of file to extract. Each path is a `str`. num_proc (`int`): Use multi-processing if `num_proc` > 1 and the length of `path_or_paths` is larger than `num_proc`. <Deprecated version="2.6.2"> Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead. </Deprecated> Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ if num_proc != "deprecated": warnings.warn( "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.", FutureWarning, ) download_config = self.download_config.copy() download_config.extract_compressed_file = True # Extract downloads the file first if it is not already downloaded if download_config.download_desc is None: download_config.download_desc = "Downloading data" extracted_paths = map_nested( partial(cached_path, download_config=download_config), path_or_paths, num_proc=download_config.num_proc, disable_tqdm=not is_progress_bar_enabled(), desc="Extracting data files", ) path_or_paths = NestedDataStructure(path_or_paths) extracted_paths = NestedDataStructure(extracted_paths) self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) return extracted_paths.data def download_and_extract(self, url_or_urls): """Download and extract given `url_or_urls`. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. Returns: extracted_path(s): `str`, extracted paths of given URL(s). """ return self.extract(self.download(url_or_urls)) def get_recorded_sizes_checksums(self): return self._recorded_sizes_checksums.copy() def delete_extracted_files(self): paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values()) for key, path in list(self.extracted_paths.items()): if path in paths_to_delete and os.path.isfile(path): os.remove(path) del self.extracted_paths[key] def manage_extracted_files(self): if self.download_config.delete_extracted: self.delete_extracted_files()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/packaged_modules/__init__.py
import inspect import re from hashlib import sha256 from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _hash_python_lines(lines: List[str]) -> str: filtered_lines = [] for line in lines: line = re.sub(r"#.*", "", line) # remove comments if line: filtered_lines.append(line) full_str = "\n".join(filtered_lines) # Make a hash from all this code full_bytes = full_str.encode("utf-8") return sha256(full_bytes).hexdigest() # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _EXTENSION_TO_MODULE = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py
from typing import List import datasets from datasets.tasks import ImageClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None class ImageFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Image BASE_COLUMN_NAME = "image" BUILDER_CONFIG_CLASS = ImageFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label") # Obtained with: # ``` # import PIL.Image # IMAGE_EXTENSIONS = [] # PIL.Image.init() # for ext, format in PIL.Image.EXTENSION.items(): # if format in PIL.Image.OPEN: # IMAGE_EXTENSIONS.append(ext[1:]) # ``` # We intentionally do not run this code on launch because: # (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic IMAGE_EXTENSIONS = [ ".blp", ".bmp", ".dib", ".bufr", ".cur", ".pcx", ".dcx", ".dds", ".ps", ".eps", ".fit", ".fits", ".fli", ".flc", ".ftc", ".ftu", ".gbr", ".gif", ".grib", ".h5", ".hdf", ".png", ".apng", ".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c", ".icns", ".ico", ".im", ".iim", ".tif", ".tiff", ".jfif", ".jpe", ".jpg", ".jpeg", ".mpg", ".mpeg", ".msp", ".pcd", ".pxr", ".pbm", ".pgm", ".ppm", ".pnm", ".psd", ".bw", ".rgb", ".rgba", ".sgi", ".ras", ".tga", ".icb", ".vda", ".vst", ".webp", ".wmf", ".emf", ".xbm", ".xpm", ] ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/csv/csv.py
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal logger = datasets.utils.logging.get_logger(__name__) _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] @dataclass class CsvConfig(datasets.BuilderConfig): """BuilderConfig for CSV.""" sep: str = "," delimiter: Optional[str] = None header: Optional[Union[int, List[int], str]] = "infer" names: Optional[List[str]] = None column_names: Optional[List[str]] = None index_col: Optional[Union[int, str, List[int], List[str]]] = None usecols: Optional[Union[List[int], List[str]]] = None prefix: Optional[str] = None mangle_dupe_cols: bool = True engine: Optional[Literal["c", "python", "pyarrow"]] = None converters: Dict[Union[int, str], Callable[[Any], Any]] = None true_values: Optional[list] = None false_values: Optional[list] = None skipinitialspace: bool = False skiprows: Optional[Union[int, List[int]]] = None nrows: Optional[int] = None na_values: Optional[Union[str, List[str]]] = None keep_default_na: bool = True na_filter: bool = True verbose: bool = False skip_blank_lines: bool = True thousands: Optional[str] = None decimal: str = "." lineterminator: Optional[str] = None quotechar: str = '"' quoting: int = 0 escapechar: Optional[str] = None comment: Optional[str] = None encoding: Optional[str] = None dialect: Optional[str] = None error_bad_lines: bool = True warn_bad_lines: bool = True skipfooter: int = 0 doublequote: bool = True memory_map: bool = False float_precision: Optional[str] = None chunksize: int = 10_000 features: Optional[datasets.Features] = None encoding_errors: Optional[str] = "strict" on_bad_lines: Literal["error", "warn", "skip"] = "error" date_format: Optional[str] = None def __post_init__(self): if self.delimiter is not None: self.sep = self.delimiter if self.column_names is not None: self.names = self.column_names @property def pd_read_csv_kwargs(self): pd_read_csv_kwargs = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self, files): schema = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str dtype = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(csv_file_reader): pa_table = pa.Table.from_pandas(df) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/json/json.py
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline logger = datasets.utils.logging.get_logger(__name__) @dataclass class JsonConfig(datasets.BuilderConfig): """BuilderConfig for JSON.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None field: Optional[str] = None use_threads: bool = True # deprecated block_size: Optional[int] = None # deprecated chunksize: int = 10 << 20 # 10MB newlines_in_values: Optional[bool] = None class Json(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = JsonConfig def _info(self): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") self.config.chunksize = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): type = self.config.features.arrow_schema.field(column_name).type pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: dataset = json.load(f) # We keep only the field we are interested in dataset = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(dataset, (list, tuple)): keys = set().union(*[row.keys() for row in dataset]) mapping = {col: [row.get(col) for row in dataset] for col in keys} else: mapping = dataset pa_table = pa.Table.from_pydict(mapping) yield file_idx, self._cast_table(pa_table) # If the file has one json object per line else: with open(file, "rb") as f: batch_idx = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small block_size = max(self.config.chunksize // 32, 16 << 10) encoding_errors = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: batch = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(f) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") try: while True: try: pa_table = paj.read_json( io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(e, pa.ArrowInvalid) and "straddling" not in str(e) or block_size > len(batch) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( file, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f: dataset = json.load(f) except json.JSONDecodeError: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(dataset, list): # list is the only sequence type supported in JSON try: keys = set().union(*[row.keys() for row in dataset]) mapping = {col: [row.get(col) for row in dataset] for col in keys} pa_table = pa.Table.from_pydict(mapping) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise ValueError(f"Not able to read records in the JSON file at {file}.") from None yield file_idx, self._cast_table(pa_table) break else: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise ValueError( f"Not able to read records in the JSON file at {file}. " f"You should probably indicate the field of the JSON file containing your records. " f"This JSON file contain the following fields: {str(list(dataset.keys()))}. " f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/parquet/parquet.py
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ParquetConfig(datasets.BuilderConfig): """BuilderConfig for Parquet.""" batch_size: int = 10_000 columns: Optional[List[str]] = None features: Optional[datasets.Features] = None class Parquet(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ParquetConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features if they are stored in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: features = datasets.Features.from_arrow_schema(pq.read_schema(f)) if self.config.columns is not None: self.info.features = datasets.Features( {col: feat for col, feat in features.items() if col in self.config.columns} ) self.info.features = features break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): if self.config.features is not None and self.config.columns is not None: if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns): raise ValueError( f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: parquet_file = pq.ParquetFile(f) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns) ): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None class AudioFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Audio BASE_COLUMN_NAME = "audio" BUILDER_CONFIG_CLASS = AudioFolderConfig EXTENSIONS: List[str] # definition at the bottom of the script CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label") # Obtained with: # ``` # import soundfile as sf # # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] # # # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: # AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) # ``` # We intentionally do not run this code on launch because: # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic AUDIO_EXTENSIONS = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/generator/generator.py
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None def __post_init__(self): assert self.generator is not None, "generator must be specified" if self.gen_kwargs is None: self.gen_kwargs = {} class Generator(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = GeneratorConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)] def _generate_examples(self, **gen_kwargs): for idx, ex in enumerate(self.config.generator(**gen_kwargs)): yield idx, ex
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
import collections import itertools import os from dataclasses import dataclass from typing import List, Optional, Tuple, Type import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets from datasets.features.features import FeatureType from datasets.tasks.base import TaskTemplate logger = datasets.utils.logging.get_logger(__name__) def count_path_segments(path): return path.replace("\\", "/").count("/") @dataclass class FolderBasedBuilderConfig(datasets.BuilderConfig): """BuilderConfig for AutoFolder.""" features: Optional[datasets.Features] = None drop_labels: bool = None drop_metadata: bool = None class FolderBasedBuilder(datasets.GeneratorBasedBuilder): """ Base class for generic data loaders for vision and image data. Abstract class attributes to be overridden by a child class: BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files will be included in a dataset) CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure """ BASE_FEATURE: Type[FeatureType] BASE_COLUMN_NAME: str BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig EXTENSIONS: List[str] CLASSIFICATION_TASK: TaskTemplate METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") # Do an early pass if: # * `drop_labels` is None (default) or False, to infer the class labels # * `drop_metadata` is None (default) or False, to find the metadata files do_analyze = not self.config.drop_labels or not self.config.drop_metadata labels, path_depths = set(), set() metadata_files = collections.defaultdict(set) def analyze(files_or_archives, downloaded_files_or_dirs, split): if len(downloaded_files_or_dirs) == 0: return # The files are separated from the archives at this point, so check the first sample # to see if it's a file or a directory and iterate accordingly if os.path.isfile(downloaded_files_or_dirs[0]): original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs for original_file, downloaded_file in zip(original_files, downloaded_files): original_file, downloaded_file = str(original_file), str(downloaded_file) _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(original_file))) path_depths.add(count_path_segments(original_file)) elif os.path.basename(original_file) in self.METADATA_FILENAMES: metadata_files[split].add((original_file, downloaded_file)) else: original_file_name = os.path.basename(original_file) logger.debug( f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." ) else: archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs for archive, downloaded_dir in zip(archives, downloaded_dirs): archive, downloaded_dir = str(archive), str(downloaded_dir) for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) path_depths.add(count_path_segments(downloaded_dir_file)) elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: metadata_files[split].add((None, downloaded_dir_file)) else: archive_file_name = os.path.basename(archive) original_file_name = os.path.basename(downloaded_dir_file) logger.debug( f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." ) data_files = self.config.data_files splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files, archives = self._split_files_and_archives(files) downloaded_files = dl_manager.download(files) downloaded_dirs = dl_manager.download_and_extract(archives) if do_analyze: # drop_metadata is None or False, drop_labels is None or False logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") analyze(files, downloaded_files, split_name) analyze(archives, downloaded_dirs, split_name) if metadata_files: # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False add_metadata = not self.config.drop_metadata # if `metadata_files` are found, add labels only if # `drop_labels` is set up to False explicitly (not-default behavior) add_labels = self.config.drop_labels is False else: # if `metadata_files` are not found, don't add metadata add_metadata = False # if `metadata_files` are not found and `drop_labels` is None (default) - # add labels if files are on the same level in directory hierarchy and there is more than one label add_labels = ( (len(labels) > 1 and len(path_depths) == 1) if self.config.drop_labels is None else not self.config.drop_labels ) if add_labels: logger.info("Adding the labels inferred from data directories to the dataset's features...") if add_metadata: logger.info("Adding metadata to the dataset...") else: add_labels, add_metadata, metadata_files = False, False, {} splits.append( datasets.SplitGenerator( name=split_name, gen_kwargs={ "files": [(file, downloaded_file) for file, downloaded_file in zip(files, downloaded_files)] + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], "metadata_files": metadata_files, "split_name": split_name, "add_labels": add_labels, "add_metadata": add_metadata, }, ) ) if add_metadata: # Verify that: # * all metadata files have the same set of features # * the `file_name` key is one of the metadata keys and is of type string features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] # Check that all metadata files share the same format metadata_ext = { os.path.splitext(downloaded_metadata_file)[1][1:] for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()) } if len(metadata_ext) > 1: raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") metadata_ext = metadata_ext.pop() for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): pa_metadata_table = self._read_metadata(downloaded_metadata_file) features_per_metadata_file.append( (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) ) for downloaded_metadata_file, metadata_features in features_per_metadata_file: if metadata_features != features_per_metadata_file[0][1]: raise ValueError( f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" ) metadata_features = features_per_metadata_file[0][1] if "file_name" not in metadata_features: raise ValueError("`file_name` must be present as dictionary key in metadata files") if metadata_features["file_name"] != datasets.Value("string"): raise ValueError("`file_name` key must be a string") del metadata_features["file_name"] else: metadata_features = None # Normally, we would do this in _info, but we need to know the labels and/or metadata # before building the features if self.config.features is None: if add_labels: self.info.features = datasets.Features( { self.BASE_COLUMN_NAME: self.BASE_FEATURE(), "label": datasets.ClassLabel(names=sorted(labels)), } ) self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] else: self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) if add_metadata: # Warn if there are duplicated keys in metadata compared to the existing features # (`BASE_COLUMN_NAME`, optionally "label") duplicated_keys = set(self.info.features) & set(metadata_features) if duplicated_keys: logger.warning( f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " f"the features dictionary." ) # skip metadata duplicated keys self.info.features.update( { feature: metadata_features[feature] for feature in metadata_features if feature not in duplicated_keys } ) return splits def _split_files_and_archives(self, data_files): files, archives = [], [] for data_file in data_files: _, data_file_ext = os.path.splitext(data_file) if data_file_ext.lower() in self.EXTENSIONS: files.append(data_file) elif os.path.basename(data_file) in self.METADATA_FILENAMES: files.append(data_file) else: archives.append(data_file) return files, archives def _read_metadata(self, metadata_file): metadata_file_ext = os.path.splitext(metadata_file)[1][1:] if metadata_file_ext == "csv": # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module return pa.Table.from_pandas(pd.read_csv(metadata_file)) else: with open(metadata_file, "rb") as f: return paj.read_json(f) def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): split_metadata_files = metadata_files.get(split_name, []) sample_empty_metadata = ( {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} ) last_checked_dir = None metadata_dir = None metadata_dict = None downloaded_metadata_file = None if split_metadata_files: metadata_ext = { os.path.splitext(downloaded_metadata_file)[1][1:] for _, downloaded_metadata_file in split_metadata_files } metadata_ext = metadata_ext.pop() file_idx = 0 for original_file, downloaded_file_or_dir in files: if original_file is not None: _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if add_metadata: # If the file is a file of a needed type, and we've just entered a new directory, # find the nereast metadata file (by counting path segments) for the directory current_dir = os.path.dirname(original_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is not None # ignore metadata_files that are inside archives and not os.path.relpath( original_file, os.path.dirname(metadata_file_candidate) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata(downloaded_metadata_file) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) if metadata_dir is not None and downloaded_metadata_file is not None: file_relpath = os.path.relpath(original_file, metadata_dir) file_relpath = file_relpath.replace("\\", "/") if file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[file_relpath] else: raise ValueError( f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(original_file))} else: sample_label = {} yield file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_file_or_dir, **sample_metadata, **sample_label, } file_idx += 1 else: for downloaded_dir_file in downloaded_file_or_dir: _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext.lower() in self.EXTENSIONS: if add_metadata: current_dir = os.path.dirname(downloaded_dir_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is None # ignore metadata_files that are not inside archives and not os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata(downloaded_metadata_file) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(downloaded_metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) if metadata_dir is not None and downloaded_metadata_file is not None: downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") if downloaded_dir_file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[downloaded_dir_file_relpath] else: raise ValueError( f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} else: sample_label = {} yield file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_dir_file, **sample_metadata, **sample_label, } file_idx += 1
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/arrow/arrow.py
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Optional[datasets.Features] = None class Arrow(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ArrowConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/spark/spark.py
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int logger = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class SparkConfig(datasets.BuilderConfig): """BuilderConfig for Spark.""" features: Optional[datasets.Features] = None def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], ): import pyspark def generate_fn(): df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) row_id = 0 # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = -1 for row in rows: row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: curr_partition = part_id row_id = 0 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 return generate_fn class SparkExamplesIterable(_BaseExamplesIterable): def __init__( self, df: "pyspark.sql.DataFrame", partition_order=None, ): self.df = df self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) def __iter__(self): yield from self.generate_examples_fn() def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": partition_order = list(range(self.df.rdd.getNumPartitions())) generator.shuffle(partition_order) return SparkExamplesIterable(self.df, partition_order=partition_order) def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) return SparkExamplesIterable(self.df, partition_order=partition_order) @property def n_shards(self) -> int: return len(self.partition_order) class Spark(datasets.DatasetBuilder): BUILDER_CONFIG_CLASS = SparkConfig def __init__( self, df: "pyspark.sql.DataFrame", cache_dir: str = None, working_dir: str = None, **config_kwargs, ): import pyspark self._spark = pyspark.sql.SparkSession.builder.getOrCreate() self.df = df self._working_dir = working_dir super().__init__( cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs, ) def _validate_cache_dir(self): # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling # error due to pickling the SparkContext. cache_dir = self._cache_dir # Returns the path of the created file. def create_cache_and_write_probe(context): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(cache_dir, exist_ok=True) probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(probe_file, "a") return [probe_file] if self._spark.conf.get("spark.master", "").startswith("local"): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: probe = ( self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() ) if os.path.isfile(probe[0]): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] def _repartition_df_if_needed(self, max_shard_size): import pyspark def get_arrow_batch_size(it): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) df_num_rows = self.df.count() sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. approx_bytes_per_row = ( self.df.limit(sample_num_rows) .repartition(1) .mapInArrow(get_arrow_batch_size, "batch_bytes: long") .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) .collect()[0] .sample_bytes / sample_num_rows ) approx_total_size = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) self.df = self.df.repartition(new_num_partitions) def _prepare_split_single( self, fpath: str, file_format: str, max_shard_size: int, ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath embed_local_files = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. features = self.config.features writer_batch_size = self._writer_batch_size storage_options = self._fs.storage_options def write_arrow(it): # Within the same SparkContext, no two task attempts will share the same attempt ID. task_id = pyspark.TaskContext().taskAttemptId() first_batch = next(it, None) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) shard_id = 0 writer = writer_class( features=features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([first_batch]) writer.write_table(table) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 writer = writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([batch]) writer.write_table(table) if writer._num_bytes > 0: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(working_fpath)): dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) shutil.move(file, dest) stats = ( self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") .groupBy("task_id") .agg( pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), pyspark.sql.functions.count("num_bytes").alias("num_shards"), pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _prepare_split( self, split_generator: "datasets.SplitGenerator", file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): self._validate_cache_dir() max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) self._repartition_df_if_needed(max_shard_size) is_local = not is_remote_filesystem(self._fs) path_join = os.path.join if is_local else posixpath.join SUFFIX = "-TTTTT-SSSSS-of-NNNNN" fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = path_join(self._output_dir, fname) total_num_examples = 0 total_num_bytes = 0 total_shards = 0 task_id_and_num_shards = [] all_shard_lengths = [] for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): ( num_examples, num_bytes, num_shards, shard_lengths, ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards)) all_shard_lengths.extend(shard_lengths) split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: split_generator.split_info.shard_lengths = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. fs = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( task_id: int, shard_id: int, global_shard_id: int, ): rename( fs, fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) args = [] global_shard_id = 0 for i in range(len(task_id_and_num_shards)): task_id, num_shards = task_id_and_num_shards[i] for shard_id in range(num_shards): args.append([task_id, shard_id, global_shard_id]) global_shard_id += 1 self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() else: # don't use any pattern shard_id = 0 task_id = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace(SUFFIX, ""), ) def _get_examples_iterable_for_split( self, split_generator: "datasets.SplitGenerator", ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/pandas/pandas.py
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Features] = None class Pandas(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = PandasConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for i, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: pa_table = pa.Table.from_pandas(pd.read_pickle(f)) yield i, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/sql/sql.py
import sys from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast if TYPE_CHECKING: import sqlite3 import sqlalchemy logger = datasets.utils.logging.get_logger(__name__) @dataclass class SqlConfig(datasets.BuilderConfig): """BuilderConfig for SQL.""" sql: Union[str, "sqlalchemy.sql.Selectable"] = None con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None index_col: Optional[Union[str, List[str]]] = None coerce_float: bool = True params: Optional[Union[List, Tuple, Dict]] = None parse_dates: Optional[Union[List, Dict]] = None columns: Optional[List[str]] = None chunksize: Optional[int] = 10_000 features: Optional[datasets.Features] = None def __post_init__(self): if self.sql is None: raise ValueError("sql must be specified") if self.con is None: raise ValueError("con must be specified") def create_config_id( self, config_kwargs: dict, custom_features: Optional[datasets.Features] = None, ) -> str: config_kwargs = config_kwargs.copy() # We need to stringify the Selectable object to make its hash deterministic # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html sql = config_kwargs["sql"] if not isinstance(sql, str): if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: import sqlalchemy if isinstance(sql, sqlalchemy.sql.Selectable): engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") sql_str = str(sql.compile(dialect=engine.dialect)) config_kwargs["sql"] = sql_str else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) con = config_kwargs["con"] if not isinstance(con, str): config_kwargs["con"] = id(con) logger.info( f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." ) return super().create_config_id(config_kwargs, custom_features=custom_features) @property def pd_read_sql_kwargs(self): pd_read_sql_kwargs = { "index_col": self.index_col, "columns": self.columns, "params": self.params, "coerce_float": self.coerce_float, "parse_dates": self.parse_dates, } return pd_read_sql_kwargs class Sql(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = SqlConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self): chunksize = self.config.chunksize sql_reader = pd.read_sql( self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs ) sql_reader = [sql_reader] if chunksize is None else sql_reader for chunk_idx, df in enumerate(sql_reader): pa_table = pa.Table.from_pandas(df) yield chunk_idx, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets/packaged_modules
hf_public_repos/datasets/src/datasets/packaged_modules/text/text.py
import itertools import warnings from dataclasses import dataclass from io import StringIO from typing import Optional import pyarrow as pa import datasets from datasets.features.features import require_storage_cast from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class TextConfig(datasets.BuilderConfig): """BuilderConfig for text files.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" errors = "deprecated" encoding_errors: Optional[str] = None chunksize: int = 10 << 20 # 10MB keep_linebreaks: bool = False sample_by: str = "line" def __post_init__(self): if self.errors != "deprecated": warnings.warn( "'errors' was deprecated in favor of 'encoding_erros' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'encoding_errors={self.errors}' instead.", FutureWarning, ) self.encoding_errors = self.errors class Text(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = TextConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. If str or List[str], then the dataset returns only the 'train' split. If dict, then keys should be from the `datasets.Split` enum. """ if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa_table.cast(schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table else: return pa_table.cast(pa.schema({"text": pa.string()})) def _generate_tables(self, files): pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: if self.config.sample_by == "line": batch_idx = 0 while True: batch = f.read(self.config.chunksize) if not batch: break batch += f.readline() # finish current line # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) batch = StringIO(batch).readlines() if not self.config.keep_linebreaks: batch = [line.rstrip("\n") for line in batch] pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 elif self.config.sample_by == "paragraph": batch_idx = 0 batch = "" while True: new_batch = f.read(self.config.chunksize) if not new_batch: break batch += new_batch batch += f.readline() # finish current line batch = batch.split("\n\n") pa_table = pa.Table.from_arrays( [pa.array([example for example in batch[:-1] if example])], names=pa_table_names ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 batch = batch[-1] if batch: pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) yield (file_idx, batch_idx), self._cast_table(pa_table) elif self.config.sample_by == "document": text = f.read() pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) yield file_idx, self._cast_table(pa_table)
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/filesystems/__init__.py
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [ compression.Bz2FileSystem, compression.GzipFileSystem, compression.Lz4FileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def extract_path_from_uri(dataset_path: str) -> str: """ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`). Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory. """ if "://" in dataset_path: dataset_path = dataset_path.split("://")[1] return dataset_path def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Validates if filesystem has remote protocol. Args: fs (`fsspec.spec.AbstractFileSystem`): An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`]. """ if fs is not None and fs.protocol != "file": return True else: return False def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str): """ Renames the file `src` in `fs` to `dst`. """ is_local = not is_remote_filesystem(fs) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst)) else: fs.mv(src, dst, recursive=True) def _reset_fsspec_lock() -> None: """ Clear reference to the loop and thread. This is necessary otherwise HTTPFileSystem hangs in the ML training loop. Only required for fsspec >= 0.9.0 See https://github.com/fsspec/gcsfs/issues/379 """ if hasattr(fsspec.asyn, "reset_lock"): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: fsspec.asyn.iothread[0] = None fsspec.asyn.loop[0] = None fsspec.asyn.lock = threading.Lock()
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/filesystems/compression.py
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): """Read contents of compressed file as a filesystem with one file inside.""" root_marker = "" protocol: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) compression: str = None # compression type in fsspec. ex: "gzip" extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs ): """ The compressed file system can be instantiated from any compressed file. It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive. The single file inside the filesystem is named after the compresssed file, without the compression extension at the end of the filename. Args: fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()`` mode (:obj:``str``): Currently, only 'rb' accepted target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL. target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS. """ super().__init__(self, **kwargs) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode self.file = fsspec.open( fo, mode="rb", protocol=target_protocol, compression=self.compression, client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed. }, **(target_options or {}), ) self.compressed_name = os.path.basename(self.file.path.split("::")[0]) self.uncompressed_name = ( self.compressed_name[: self.compressed_name.rindex(".")] if "." in self.compressed_name else self.compressed_name ) self.dir_cache = None @classmethod def _strip_protocol(cls, path): # compressed file paths are always relative to the archive root return super()._strip_protocol(path).lstrip("/") def _get_dirs(self): if self.dir_cache is None: f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name} self.dir_cache = {f["name"]: f} def cat(self, path: str): return self.file.open().read() def _open( self, path: str, mode: str = "rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): path = self._strip_protocol(path) if mode != "rb": raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'") return self.file.open() class Bz2FileSystem(BaseCompressedFileFileSystem): """Read contents of BZ2 file as a filesystem with one file inside.""" protocol = "bz2" compression = "bz2" extension = ".bz2" class GzipFileSystem(BaseCompressedFileFileSystem): """Read contents of GZIP file as a filesystem with one file inside.""" protocol = "gzip" compression = "gzip" extension = ".gz" class Lz4FileSystem(BaseCompressedFileFileSystem): """Read contents of LZ4 file as a filesystem with one file inside.""" protocol = "lz4" compression = "lz4" extension = ".lz4" class XzFileSystem(BaseCompressedFileFileSystem): """Read contents of .xz (LZMA) file as a filesystem with one file inside.""" protocol = "xz" compression = "xz" extension = ".xz" class ZstdFileSystem(BaseCompressedFileFileSystem): """ Read contents of zstd file as a filesystem with one file inside. Note that reading in binary mode with fsspec isn't supported yet: https://github.com/indygreg/python-zstandard/issues/136 """ protocol = "zstd" compression = "zstd" extension = ".zst" def __init__( self, fo: str, mode: str = "rb", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, block_size: int = DEFAULT_BLOCK_SIZE, **kwargs, ): super().__init__( fo=fo, mode=mode, target_protocol=target_protocol, target_options=target_options, block_size=block_size, **kwargs, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 _enter = self.file.__enter__ class WrappedFile: def __init__(self, file_): self._file = file_ def __enter__(self): self._file.__enter__() return self def __exit__(self, *args, **kwargs): self._file.__exit__(*args, **kwargs) def __iter__(self): return iter(self._file) def __next__(self): return next(self._file) def __getattr__(self, attr): return getattr(self._file, attr) def fixed_enter(*args, **kwargs): return WrappedFile(_enter(*args, **kwargs)) self.file.__enter__ = fixed_enter
0
hf_public_repos/datasets/src/datasets
hf_public_repos/datasets/src/datasets/filesystems/s3filesystem.py
import s3fs from ..utils.deprecation_utils import deprecated @deprecated("Use s3fs.S3FileSystem instead.") class S3FileSystem(s3fs.S3FileSystem): """ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html). Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`. Args: anon (`bool`, default to `False`): Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given, or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order). key (`str`): If not anonymous, use this access key ID, if specified. secret (`str`): If not anonymous, use this secret access key, if specified. token (`str`): If not anonymous, use this security token, if specified. use_ssl (`bool`, defaults to `True`): Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is also set in `client_kwargs`, the value set in `client_kwargs` will take priority. s3_additional_kwargs (`dict`): Parameters that are used when calling S3 API methods. Typically used for things like ServerSideEncryption. client_kwargs (`dict`): Parameters for the botocore client. requester_pays (`bool`, defaults to `False`): Whether `RequesterPays` buckets are supported. default_block_size (`int`): If given, the default block size value used for `open()`, if no specific value is given at all time. The built-in default is 5MB. default_fill_cache (`bool`, defaults to `True`): Whether to use cache filling with open by default. Refer to `S3File.open`. default_cache_type (`str`, defaults to `bytes`): If given, the default `cache_type` value used for `open()`. Set to `none` if no caching is desired. See fsspec's documentation for other available `cache_type` values. version_aware (`bool`, defaults to `False`): Whether to support bucket versioning. If enable this will require the user to have the necessary IAM permissions for dealing with versioned objects. cache_regions (`bool`, defaults to `False`): Whether to cache bucket regions. Whenever a new bucket is used, it will first find out which region it belongs to and then use the client for that region. asynchronous (`bool`, defaults to `False`): Whether this instance is to be used from inside coroutines. config_kwargs (`dict`): Parameters passed to `botocore.client.Config`. **kwargs: Other parameters for core session. session (`aiobotocore.session.AioSession`): Session to be used for all connections. This session will be used inplace of creating a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`. skip_instance_cache (`bool`): Control reuse of instances. Passed on to `fsspec`. use_listings_cache (`bool`): Control reuse of directory listings. Passed on to `fsspec`. listings_expiry_time (`int` or `float`): Control reuse of directory listings. Passed on to `fsspec`. max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`. Examples: Listing files from public S3 bucket. ```py >>> import datasets >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP ['dataset_info.json.json','dataset.arrow','state.json'] ``` Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`. ```py >>> import datasets >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP ['dataset_info.json.json','dataset.arrow','state.json'] ``` Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`. ```py >>> import botocore >>> from datasets.filesystems import S3Filesystem >>> s3_session = botocore.session.Session(profile_name='my_profile_name') >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP ``` Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`]. ```py >>> from datasets import load_from_disk >>> from datasets.filesystems import S3Filesystem >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP >>> print(len(dataset)) 25000 ``` Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`]. ```py >>> from datasets import load_dataset >>> from datasets.filesystems import S3Filesystem >>> dataset = load_dataset("imdb") >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP ``` """
0
hf_public_repos
hf_public_repos/text-generation-inference/README.md
<div align="center"> ![image](https://github.com/huggingface/text-generation-inference/assets/3841370/38ba1531-ea0d-4851-b31a-a6d4ddc944b0) # Text Generation Inference <a href="https://github.com/huggingface/text-generation-inference"> <img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social"> </a> <a href="https://github.com/huggingface/text-generation-inference/blob/main/LICENSE"> <img alt="License" src="https://img.shields.io/github/license/huggingface/text-generation-inference"> </a> <a href="https://huggingface.github.io/text-generation-inference"> <img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational"> </a> </div> A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co) to power LLMs api-inference widgets. ## Table of contents - [Features](#features) - [Optimized Architectures](#optimized-architectures) - [Get Started](#get-started) - [Docker](#docker) - [API Documentation](#api-documentation) - [Using a private or gated model](#using-a-private-or-gated-model) - [A note on Shared Memory](#a-note-on-shared-memory-shm) - [Distributed Tracing](#distributed-tracing) - [Local Install](#local-install) - [CUDA Kernels](#cuda-kernels) - [Run Falcon](#run-falcon) - [Run](#run) - [Quantization](#quantization) - [Develop](#develop) - [Testing](#testing) ## Features - Serve the most popular Large Language Models with a simple launcher - Tensor Parallelism for faster inference on multiple GPUs - Token streaming using Server-Sent Events (SSE) - [Continuous batching of incoming requests](https://github.com/huggingface/text-generation-inference/tree/main/router) for increased total throughput - Optimized transformers code for inference using [flash-attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) - [Safetensors](https://github.com/huggingface/safetensors) weight loading - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor)) - Stop sequences - Log probabilities - Production ready (distributed tracing with Open Telemetry, Prometheus metrics) ## Optimized architectures - [BLOOM](https://huggingface.co/bigscience/bloom) - [FLAN-T5](https://huggingface.co/google/flan-t5-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b) - [Llama](https://github.com/facebookresearch/llama) - [OPT](https://huggingface.co/facebook/opt-66b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Starcoder](https://huggingface.co/bigcode/starcoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) - [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) Other architectures are supported on a best effort basis using: `AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` or `AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")` ## Get started ### Docker The easiest way of getting started is using the official Docker container: ```shell model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model ``` **Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli: ``` text-generation-launcher --help ``` You can then query the model using either the `/generate` or `/generate_stream` routes: ```shell curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ```shell curl 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` or from Python: ```shell pip install text-generation ``` ```python from text_generation import Client client = Client("http://127.0.0.1:8080") print(client.generate("What is Deep Learning?", max_new_tokens=20).generated_text) text = "" for response in client.generate_stream("What is Deep Learning?", max_new_tokens=20): if not response.token.special: text += response.token.text print(text) ``` ### API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference). ### Using a private or gated model You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by `text-generation-inference`. This allows you to gain access to protected resources. For example, if you want to serve the gated Llama V2 model variants: 1. Go to https://huggingface.co/settings/tokens 2. Copy your cli READ token 3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>` or with Docker: ```shell model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run token=<your cli READ token> docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model ``` ### A note on Shared Memory (shm) [`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by `PyTorch` to do distributed training/inference. `text-generation-inference` make use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models. In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if peer-to-peer using NVLink or PCI is not possible. To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command. If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by creating a volume with: ```yaml - name: shm emptyDir: medium: Memory sizeLimit: 1Gi ``` and mounting it to `/dev/shm`. Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that this will impact performance. ### Distributed Tracing `text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature by setting the address to an OTLP collector with the `--otlp-endpoint` argument. ### Local install You can also opt to install `text-generation-inference` locally. First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least Python 3.9, e.g. using `conda`: ```shell curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh conda create -n text-generation-inference python=3.9 conda activate text-generation-inference ``` You may also need to install Protoc. On Linux: ```shell PROTOC_ZIP=protoc-21.12-linux-x86_64.zip curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' rm -f $PROTOC_ZIP ``` On MacOS, using Homebrew: ```shell brew install protobuf ``` Then run: ```shell BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels make run-falcon-7b-instruct ``` **Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```shell sudo apt-get install libssl-dev gcc -y ``` ### CUDA Kernels The custom CUDA kernels are only tested on NVIDIA A100s. If you have any installation or runtime issues, you can remove the kernels by using the `DISABLE_CUSTOM_KERNELS=True` environment variable. Be aware that the official Docker image has them enabled by default. ## Run Falcon ### Run ```shell make run-falcon-7b-instruct ``` ### Quantization You can also quantize the weights with bitsandbytes to reduce the VRAM requirement: ```shell make run-falcon-7b-instruct-quantize ``` ## Develop ```shell make server-dev make router-dev ``` ## Testing ```shell # python make python-server-tests make python-client-tests # or both server and client tests make python-tests # rust cargo tests make rust-tests # integration tests make integration-tests ```
0
hf_public_repos
hf_public_repos/text-generation-inference/sagemaker-entrypoint.sh
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
0
hf_public_repos
hf_public_repos/text-generation-inference/Dockerfile
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.70 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef as planner COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder ARG GIT_SHA ARG DOCKER_LABEL RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo build --release # Python builder # Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile FROM debian:bullseye-slim as pytorch-install ARG PYTORCH_VERSION=2.0.0 ARG PYTHON_VERSION=3.9 ARG CUDA_VERSION=11.8 ARG MAMBA_VERSION=23.1.0-1 ARG CUDA_CHANNEL=nvidia ARG INSTALL_CHANNEL=pytorch # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git && \ rm -rf /var/lib/apt/lists/* # Install conda # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh # Install pytorch # On arm64 we exit with an error code RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" pytorch==$PYTORCH_VERSION "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \ esac && \ /opt/conda/bin/conda clean -ya # CUDA kernels builder image FROM pytorch-install as kernel-builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ ninja-build \ && rm -rf /var/lib/apt/lists/* RUN /opt/conda/bin/conda install -c "nvidia/label/cuda-11.8.0" cuda==11.8 && \ /opt/conda/bin/conda clean -ya # Build Flash Attention CUDA kernels FROM kernel-builder as flash-att-builder WORKDIR /usr/src COPY server/Makefile-flash-att Makefile # Build specific version of flash attention RUN make build-flash-attention # Build Flash Attention v2 CUDA kernels FROM kernel-builder as flash-att-v2-builder WORKDIR /usr/src COPY server/Makefile-flash-att-v2 Makefile # Build specific version of flash attention v2 RUN make build-flash-attention-v2 # Build Transformers exllama kernels FROM kernel-builder as exllama-kernels-builder WORKDIR /usr/src COPY server/exllama_kernels/ . # Build specific version of transformers RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build # Build Transformers CUDA kernels FROM kernel-builder as custom-kernels-builder WORKDIR /usr/src COPY server/custom_kernels/ . # Build specific version of transformers RUN python setup.py build # Build vllm CUDA kernels FROM kernel-builder as vllm-builder WORKDIR /usr/src COPY server/Makefile-vllm Makefile # Build specific version of vllm RUN make build-vllm # Text Generation Inference base image FROM nvidia/cuda:11.8.0-base-ubuntu20.04 as base # Conda env ENV PATH=/opt/conda/bin:$PATH \ CONDA_PREFIX=/opt/conda # Text Generation Inference base env ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ make \ && rm -rf /var/lib/apt/lists/* # Copy conda with PyTorch installed COPY --from=pytorch-install /opt/conda /opt/conda # Copy build artifacts from flash attention builder COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from flash attention v2 builder COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy build artifacts from exllama kernels builder COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Copy builds artifacts from vllm builder COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages # Install flash-attention dependencies RUN pip install einops --no-cache-dir # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements.txt && \ pip install ".[bnb, accelerate]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ g++ \ && rm -rf /var/lib/apt/lists/* # AWS Sagemaker compatbile image FROM base as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
0
hf_public_repos
hf_public_repos/text-generation-inference/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2022 Hugging Face Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos
hf_public_repos/text-generation-inference/.dockerignore
aml target server/transformers server/flash-attention
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.toml
[workspace] members = [ "benchmark", "router", "router/client", "router/grpc-metadata", "launcher" ] [workspace.package] version = "0.9.3" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" [profile.release] debug = 1 incremental = true lto = "off" panic = "abort"
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.lock
# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", "cipher", "cpufeatures", ] [[package]] name = "ahash" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] [[package]] name = "aho-corasick" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] name = "anstream" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is-terminal", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", "windows-sys 0.48.0", ] [[package]] name = "anyhow" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arc-swap" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "async-rustls" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93b21a03b7c21702a0110f9f8d228763a533570deb376119042dabf33c37a01a" dependencies = [ "futures-io", "rustls", "webpki", ] [[package]] name = "async-stream" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "async-trait" version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "average" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843ec791d3f24503bbf72bbd5e49a3ab4dbb4bcd0a8ef6b0c908efa73caa27b1" dependencies = [ "easy-cast", "float-ord", "num-traits", ] [[package]] name = "awaitdrop" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771051cdc7eec2dc1b23fbf870bb7fbb89136fe374227c875e377f1eed99a429" dependencies = [ "futures", "generational-arena", "parking_lot", "slotmap", ] [[package]] name = "axum" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", "hyper", "itoa", "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", "tower-layer", "tower-service", ] [[package]] name = "axum-core" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", "mime", "rustversion", "tower-layer", "tower-service", ] [[package]] name = "axum-tracing-opentelemetry" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "164b95427e83b79583c7699a72b4a6b485a12bbdef5b5c054ee5ff2296d82f52" dependencies = [ "axum", "futures", "http", "opentelemetry 0.18.0", "tower", "tower-http 0.3.5", "tracing", "tracing-opentelemetry 0.18.0", ] [[package]] name = "backtrace" version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bumpalo" version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecount" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cached-path" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "097968e38f1319207f057d0f4d76452e4f4f847a5de61c5215379f297fa034f3" dependencies = [ "flate2", "fs2", "glob", "indicatif 0.16.2", "log", "rand", "reqwest", "serde", "serde_json", "sha2", "tar", "tempfile", "thiserror", "zip", ] [[package]] name = "cassowary" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cipher" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", ] [[package]] name = "clap" version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" dependencies = [ "clap_builder", "clap_derive", "once_cell", ] [[package]] name = "clap_builder" version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "clap_lex" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "console" version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", "windows-sys 0.45.0", ] [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-utils" version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] [[package]] name = "crossterm" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84cda67535339806297f1b331d6dd6320470d2a0fe65381e79ee9e156dd3d13" dependencies = [ "bitflags 1.3.2", "crossterm_winapi", "libc", "mio", "parking_lot", "signal-hook", "signal-hook-mio", "winapi", ] [[package]] name = "crossterm_winapi" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "ctrlc" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix", "windows-sys 0.48.0", ] [[package]] name = "darling" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", ] [[package]] name = "darling_core" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", "syn 1.0.109", ] [[package]] name = "darling_macro" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", "syn 1.0.109", ] [[package]] name = "dashmap" version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", "hashbrown 0.14.0", "lock_api", "once_cell", "parking_lot_core", ] [[package]] name = "derive_builder" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "derive_builder_macro" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" dependencies = [ "derive_builder_core", "syn 1.0.109", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", "subtle", ] [[package]] name = "dirs" version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", "winapi", ] [[package]] name = "easy-cast" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bd102ee8c418348759919b83b81cdbdc933ffe29740b903df448b4bafaa348e" dependencies = [ "libm", ] [[package]] name = "either" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encode_unicode" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "esaxx-rs" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f748b253ceca9fed5f42f8b5ceb3851e93102199bc25b64b65369f76e5c0a35" dependencies = [ "cc", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "filetime" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", "windows-sys 0.48.0", ] [[package]] name = "fixedbitset" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "float-ord" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" [[package]] name = "float_eq" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853" [[package]] name = "flume" version = "0.10.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" dependencies = [ "futures-core", "futures-sink", "nanorand", "pin-project", "spin 0.9.8", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] [[package]] name = "fs2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", "winapi", ] [[package]] name = "futures" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "futures-sink" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generational-arena" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" dependencies = [ "cfg-if", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", "libc", "wasi", "wasm-bindgen", ] [[package]] name = "gimli" version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "grpc-metadata" version = "0.1.0" dependencies = [ "opentelemetry 0.19.0", "tonic 0.9.2", "tracing", "tracing-opentelemetry 0.19.0", ] [[package]] name = "h2" version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap 1.9.3", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ "ahash", ] [[package]] name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "hostname" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", "winapi", ] [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] [[package]] name = "http-range-header" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "socket2", "tokio", "tower-service", "tracing", "want", ] [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite", "tokio", "tokio-io-timeout", ] [[package]] name = "hyper-tls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", "hyper", "native-tls", "tokio", "tokio-native-tls", ] [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", ] [[package]] name = "indexmap" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", "serde", ] [[package]] name = "indicatif" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" dependencies = [ "console", "lazy_static", "number_prefix 0.3.0", "regex", ] [[package]] name = "indicatif" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b" dependencies = [ "console", "lazy_static", "number_prefix 0.4.0", "regex", ] [[package]] name = "inout" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ "generic-array", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipnet" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix 0.38.4", "windows-sys 0.48.0", ] [[package]] name = "itertools" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" [[package]] name = "jobserver" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libm" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "linux-raw-sys" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach2" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" dependencies = [ "libc", ] [[package]] name = "macro_rules_attribute" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862" dependencies = [ "macro_rules_attribute-proc_macro", "paste", ] [[package]] name = "macro_rules_attribute-proc_macro" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d" [[package]] name = "match_cfg" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "matchit" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] [[package]] name = "memoffset" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] name = "metrics" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ "ahash", "metrics-macros", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ "base64 0.21.2", "hyper", "indexmap 1.9.3", "ipnet", "metrics", "metrics-util", "quanta", "thiserror", "tokio", "tracing", ] [[package]] name = "metrics-macros" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "metrics-util" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.1", "metrics", "num_cpus", "quanta", "sketches-ddsketch", ] [[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", ] [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi", "windows-sys 0.48.0", ] [[package]] name = "monostate" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f3f57a8802842f648026a33c3d2e3bb41bb309a35b1609bd7ef2b060b8b6b1b" dependencies = [ "monostate-impl", "serde", ] [[package]] name = "monostate-impl" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e72f4d2e10fde62a0f2fcb4b44ccbf4f9899dcc30c9193449f8dfb9123d71377" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "multimap" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "muxado" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e92b89ac3127251efde6f5a9586e5aae99468d06fcf9f133b377f58d5ed66446" dependencies = [ "async-trait", "awaitdrop", "bitflags 1.3.2", "bytes", "futures", "pin-project", "rand", "thiserror", "tokio", "tokio-util", "tracing", ] [[package]] name = "nanorand" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ "getrandom", ] [[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", "log", "openssl", "openssl-probe", "openssl-sys", "schannel", "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "ngrok" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87e211f407b0a084f720823a00c956aeab2c15dfe7a61760d93227bbaf048026" dependencies = [ "arc-swap", "async-rustls", "async-trait", "awaitdrop", "axum", "base64 0.13.1", "bytes", "futures", "hostname", "hyper", "muxado", "once_cell", "parking_lot", "regex", "rustls-pemfile", "serde", "serde_json", "thiserror", "tokio", "tokio-retry", "tokio-util", "tracing", "windows-sys 0.45.0", ] [[package]] name = "nix" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", "memoffset 0.7.1", "pin-utils", "static_assertions", ] [[package]] name = "nohash-hasher" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "ntapi" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", "libm", ] [[package]] name = "num_cpus" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "num_threads" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "number_prefix" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "onig" version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f" dependencies = [ "bitflags 1.3.2", "libc", "once_cell", "onig_sys", ] [[package]] name = "onig_sys" version = "69.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b829e3d7e9cc74c7e315ee8edb185bf4190da5acde74afd7fc59c35b1f086e7" dependencies = [ "cc", "pkg-config", ] [[package]] name = "openssl" version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", "once_cell", "openssl-macros", "openssl-sys", ] [[package]] name = "openssl-macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "opentelemetry" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" dependencies = [ "opentelemetry_api 0.18.0", "opentelemetry_sdk 0.18.0", ] [[package]] name = "opentelemetry" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4b8347cc26099d3aeee044065ecc3ae11469796b4d65d065a23a584ed92a6f" dependencies = [ "opentelemetry_api 0.19.0", "opentelemetry_sdk 0.19.0", ] [[package]] name = "opentelemetry-otlp" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8af72d59a4484654ea8eb183fea5ae4eb6a41d7ac3e3bae5f4d2a282a3a7d3ca" dependencies = [ "async-trait", "futures", "futures-util", "http", "opentelemetry 0.19.0", "opentelemetry-proto", "prost", "thiserror", "tokio", "tonic 0.8.3", ] [[package]] name = "opentelemetry-proto" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "045f8eea8c0fa19f7d48e7bc3128a39c2e5c533d5c61298c548dfefc1064474c" dependencies = [ "futures", "futures-util", "opentelemetry 0.19.0", "prost", "tonic 0.8.3", ] [[package]] name = "opentelemetry_api" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" dependencies = [ "fnv", "futures-channel", "futures-util", "indexmap 1.9.3", "js-sys", "once_cell", "pin-project-lite", "thiserror", ] [[package]] name = "opentelemetry_api" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed41783a5bf567688eb38372f2b7a8530f5a607a4b49d38dd7573236c23ca7e2" dependencies = [ "fnv", "futures-channel", "futures-util", "indexmap 1.9.3", "once_cell", "pin-project-lite", "thiserror", "urlencoding", ] [[package]] name = "opentelemetry_sdk" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" dependencies = [ "async-trait", "crossbeam-channel", "dashmap", "fnv", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api 0.18.0", "percent-encoding", "rand", "thiserror", "tokio", "tokio-stream", ] [[package]] name = "opentelemetry_sdk" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b3a2a91fdbfdd4d212c0dcc2ab540de2c2bcbbd90be17de7a7daf8822d010c1" dependencies = [ "async-trait", "crossbeam-channel", "dashmap", "fnv", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api 0.19.0", "percent-encoding", "rand", "thiserror", "tokio", "tokio-stream", ] [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "papergrid" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae7891b22598926e4398790c8fe6447930c72a67d36d983a49d6ce682ce83290" dependencies = [ "bytecount", "fnv", "unicode-width", ] [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", "redox_syscall 0.3.5", "smallvec", "windows-targets 0.48.1", ] [[package]] name = "password-hash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", "rand_core", "subtle", ] [[package]] name = "paste" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest", "hmac", "password-hash", "sha2", ] [[package]] name = "percent-encoding" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap 1.9.3", ] [[package]] name = "pin-project" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "pin-project-lite" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "portable-atomic" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d220334a184db82b31b83f5ff093e3315280fb2b6bbc032022b2304a509aab7a" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", "syn 1.0.109", ] [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn 1.0.109", "version_check", ] [[package]] name = "proc-macro-error-attr" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", "version_check", ] [[package]] name = "proc-macro2" version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", ] [[package]] name = "prost-build" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", "itertools 0.10.5", "lazy_static", "log", "multimap", "petgraph", "prettyplease", "prost", "prost-types", "regex", "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] [[package]] name = "quanta" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", "mach2", "once_cell", "raw-cpuid", "wasi", "web-sys", "winapi", ] [[package]] name = "quote" version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "ratatui" version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcc0d032bccba900ee32151ec0265667535c230169f5a011154cdcd984e16829" dependencies = [ "bitflags 1.3.2", "cassowary", "crossterm", "unicode-segmentation", "unicode-width", ] [[package]] name = "raw-cpuid" version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "rayon" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-cond" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd1259362c9065e5ea39a789ef40b1e3fd934c94beb7b5ab3ac6629d3b5e7cb7" dependencies = [ "either", "itertools 0.8.2", "rayon", ] [[package]] name = "rayon-core" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", "num_cpus", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick 1.0.2", "memchr", "regex-automata 0.3.3", "regex-syntax 0.7.4", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" dependencies = [ "aho-corasick 1.0.2", "memchr", "regex-syntax 0.7.4", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ "base64 0.21.2", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-tls", "ipnet", "js-sys", "log", "mime", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "winreg", ] [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin 0.5.2", "untrusted", "web-sys", "winapi", ] [[package]] name = "rust-embed" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" dependencies = [ "rust-embed-impl", "rust-embed-utils", "walkdir", ] [[package]] name = "rust-embed-impl" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", "syn 2.0.25", "walkdir", ] [[package]] name = "rust-embed-utils" version = "7.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" dependencies = [ "sha2", "walkdir", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", "windows-sys 0.48.0", ] [[package]] name = "rustix" version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ "bitflags 2.3.3", "errno", "libc", "linux-raw-sys 0.4.3", "windows-sys 0.48.0", ] [[package]] name = "rustls" version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", "sct", "webpki", ] [[package]] name = "rustls-pemfile" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ "base64 0.21.2", ] [[package]] name = "rustversion" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "ryu" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "schannel" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] name = "security-framework" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "serde_json" version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5062a995d481b2308b6064e9af76011f2921c35f97b0468811ed9f6cd91dfed" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_path_to_error" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" dependencies = [ "itoa", "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sha1" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "shellexpand" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" dependencies = [ "dirs", ] [[package]] name = "signal-hook" version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", ] [[package]] name = "signal-hook-mio" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", "mio", "signal-hook", ] [[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "sketches-ddsketch" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] [[package]] name = "slotmap" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" dependencies = [ "version_check", ] [[package]] name = "smallvec" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] [[package]] name = "spm_precompiled" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5851699c4033c63636f7ea4cf7b7c1f1bf06d0cc03cfb42e711de5a5c46cf326" dependencies = [ "base64 0.13.1", "nom", "serde", "unicode-segmentation", ] [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" version = "0.29.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "751e810399bba86e9326f5762b7f32ac5a085542df78da6a78d94e07d14d7c11" dependencies = [ "cfg-if", "core-foundation-sys", "libc", "ntapi", "once_cell", "winapi", ] [[package]] name = "tabled" version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce69a5028cd9576063ec1f48edb2c75339fd835e6094ef3e05b3a079bf594a6" dependencies = [ "papergrid", "tabled_derive", "unicode-width", ] [[package]] name = "tabled_derive" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "tar" version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec96d2ffad078296368d46ff1cb309be1c23c513b4ab0e22a45de0185275ac96" dependencies = [ "filetime", "libc", "xattr", ] [[package]] name = "tempfile" version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", "rustix 0.37.23", "windows-sys 0.48.0", ] [[package]] name = "text-generation-benchmark" version = "0.9.3" dependencies = [ "average", "clap", "crossterm", "float-ord", "ratatui", "serde", "serde_json", "tabled", "text-generation-client", "thiserror", "tokenizers", "tokio", "tracing", "tracing-subscriber", ] [[package]] name = "text-generation-client" version = "0.9.3" dependencies = [ "futures", "grpc-metadata", "prost", "prost-build", "thiserror", "tokio", "tonic 0.9.2", "tonic-build", "tower", "tracing", ] [[package]] name = "text-generation-launcher" version = "0.9.3" dependencies = [ "clap", "ctrlc", "float_eq", "nix", "reqwest", "serde", "serde_json", "tracing", "tracing-subscriber", "vergen", ] [[package]] name = "text-generation-router" version = "0.9.3" dependencies = [ "async-stream", "axum", "axum-tracing-opentelemetry", "clap", "flume", "futures", "metrics", "metrics-exporter-prometheus", "ngrok", "nohash-hasher", "opentelemetry 0.19.0", "opentelemetry-otlp", "rand", "reqwest", "serde", "serde_json", "text-generation-client", "thiserror", "tokenizers", "tokio", "tower-http 0.4.1", "tracing", "tracing-opentelemetry 0.19.0", "tracing-subscriber", "utoipa", "utoipa-swagger-ui", "vergen", ] [[package]] name = "thiserror" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "thread_local" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "time" version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", "libc", "num_threads", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokenizers" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cf49017523bf0bc01c9966f172c5f120bbb7b96cccd1708772dd42e767fb9f5" dependencies = [ "aho-corasick 0.7.20", "cached-path", "clap", "derive_builder", "dirs", "esaxx-rs", "getrandom", "indicatif 0.15.0", "itertools 0.9.0", "lazy_static", "log", "macro_rules_attribute", "monostate", "onig", "paste", "rand", "rayon", "rayon-cond", "regex", "regex-syntax 0.6.29", "reqwest", "serde", "serde_json", "spm_precompiled", "thiserror", "unicode-normalization-alignments", "unicode-segmentation", "unicode_categories", ] [[package]] name = "tokio" version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-io-timeout" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite", "tokio", ] [[package]] name = "tokio-macros" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "tokio-native-tls" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] [[package]] name = "tokio-retry" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", "rand", "tokio", ] [[package]] name = "tokio-stream" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tonic" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.13.1", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost", "prost-derive", "tokio", "tokio-stream", "tokio-util", "tower", "tower-layer", "tower-service", "tracing", "tracing-futures", ] [[package]] name = "tonic" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", "base64 0.21.2", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost", "tokio", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tonic-build" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", "syn 1.0.109", ] [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand", "slab", "tokio", "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "bitflags 1.3.2", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", "pin-project-lite", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" dependencies = [ "bitflags 2.3.3", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", "pin-project-lite", "tower-layer", "tower-service", ] [[package]] name = "tower-layer" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", ] [[package]] name = "tracing-core" version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-futures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ "pin-project", "tracing", ] [[package]] name = "tracing-log" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", "tracing-core", ] [[package]] name = "tracing-opentelemetry" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" dependencies = [ "once_cell", "opentelemetry 0.18.0", "tracing", "tracing-core", "tracing-log", "tracing-subscriber", ] [[package]] name = "tracing-opentelemetry" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00a39dcf9bfc1742fa4d6215253b33a6e474be78275884c216fc2a06267b3600" dependencies = [ "once_cell", "opentelemetry 0.19.0", "tracing", "tracing-core", "tracing-log", "tracing-subscriber", ] [[package]] name = "tracing-serde" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "serde", "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", "tracing-serde", ] [[package]] name = "try-lock" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-normalization-alignments" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de" dependencies = [ "smallvec", ] [[package]] name = "unicode-segmentation" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode_categories" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "urlencoding" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "utoipa" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520434cac5c98120177d5cc15be032703f6dca7d5ef82e725c798113b375000a" dependencies = [ "indexmap 2.0.0", "serde", "serde_json", "utoipa-gen", ] [[package]] name = "utoipa-gen" version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e22e88a487b6e0374533871b79b1f5ded05671bd0936bd547eb42f82fb9060d" dependencies = [ "proc-macro-error", "proc-macro2", "quote", "regex", "syn 2.0.25", ] [[package]] name = "utoipa-swagger-ui" version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4602d7100d3cfd8a086f30494e68532402ab662fa366c9d201d677e33cee138d" dependencies = [ "axum", "mime_guess", "regex", "rust-embed", "serde", "serde_json", "utoipa", "zip", ] [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "rustc_version", "rustversion", "sysinfo", "time", ] [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 2.0.25", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", "syn 2.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", ] [[package]] name = "which" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", "once_cell", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.1", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winreg" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] [[package]] name = "xattr" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc" dependencies = [ "libc", ] [[package]] name = "zip" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ "aes", "byteorder", "bzip2", "constant_time_eq", "crc32fast", "crossbeam-utils", "flate2", "hmac", "pbkdf2", "sha1", "time", "zstd", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", "pkg-config", ]
0
hf_public_repos
hf_public_repos/text-generation-inference/rust-toolchain.toml
[toolchain] channel = "1.70.0" components = ["rustfmt", "clippy"]
0
hf_public_repos
hf_public_repos/text-generation-inference/Makefile
install-server: cd server && make install install-custom-kernels: if [ "$$BUILD_EXTENSIONS" = "True" ]; then cd server/custom_kernels && python setup.py install; else echo "Custom kernels are disabled, you need to set the BUILD_EXTENSIONS environment variable to 'True' in order to build them. (Please read the docs, kernels might not work on all hardware)"; fi install-integration-tests: cd integration-tests && pip install -r requirements.txt cd clients/python && pip install . install-router: cd router && cargo install --path . install-launcher: cd launcher && cargo install --path . install-benchmark: cd benchmark && cargo install --path . install: install-server install-router install-launcher install-custom-kernels server-dev: cd server && make run-dev router-dev: cd router && cargo run -- --port 8080 rust-tests: install-router install-launcher cargo test integration-tests: install-integration-tests pytest -s -vv -m "not private" integration-tests update-integration-tests: install-integration-tests pytest -s -vv --snapshot-update integration-tests python-server-tests: HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests python-client-tests: pytest clients/python/tests python-tests: python-server-tests python-client-tests run-falcon-7b-instruct: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080 run-falcon-7b-instruct-quantize: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080 clean: rm -rf target aml
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/launcher/Cargo.toml
[package] name = "text-generation-launcher" description = "Text Generation Launcher" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] clap = { version = "4.1.4", features = ["derive", "env"] } ctrlc = { version = "3.2.5", features = ["termination"] } nix = "0.26.2" serde = { version = "1.0.152", features = ["derive"] } serde_json = "1.0.93" tracing = "0.1.37" tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] } [dev-dependencies] float_eq = "1.0.1" reqwest = { version = "0.11.14", features = ["blocking", "json"] } [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/launcher/build.rs
use std::error::Error; use vergen::EmitBuilder; fn main() -> Result<(), Box<dyn Error>> { // Emit cargo and rustc compile time values EmitBuilder::builder().all_cargo().all_rustc().emit()?; // Try to get the git sha from the local git repository if EmitBuilder::builder() .fail_on_error() .git_sha(false) .emit() .is_err() { // Unable to get the git sha if let Ok(sha) = std::env::var("GIT_SHA") { // Set it from an env var println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}"); } } // Set docker label if present if let Ok(label) = std::env::var("DOCKER_LABEL") { // Set it from an env var println!("cargo:rustc-env=DOCKER_LABEL={label}"); } Ok(()) }
0
hf_public_repos/text-generation-inference/launcher
hf_public_repos/text-generation-inference/launcher/src/main.rs
use clap::{Parser, ValueEnum}; use nix::sys::signal::{self, Signal}; use nix::unistd::Pid; use serde::Deserialize; use std::env; use std::ffi::OsString; use std::io::{BufRead, BufReader, Lines, Read}; use std::os::unix::process::{CommandExt, ExitStatusExt}; use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::TryRecvError; use std::sync::{mpsc, Arc}; use std::thread; use std::thread::sleep; use std::time::{Duration, Instant}; use std::{fs, io}; use tracing_subscriber::EnvFilter; mod env_runtime; #[derive(Clone, Copy, Debug, ValueEnum)] enum Quantization { Bitsandbytes, Gptq, } impl std::fmt::Display for Quantization { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { Quantization::Bitsandbytes => { write!(f, "bitsandbytes") } Quantization::Gptq => { write!(f, "gptq") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Dtype { Float16, #[clap(name = "bfloat16")] BFloat16, } impl std::fmt::Display for Dtype { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { Dtype::Float16 => { write!(f, "float16") } Dtype::BFloat16 => { write!(f, "bfloat16") } } } } /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the model to load. /// Can be a MODEL_ID as listed on <https://hf.co/models> like /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. /// Or it can be a local directory containing the necessary files /// as saved by `save_pretrained(...)` methods of transformers #[clap(default_value = "bigscience/bloom-560m", long, env)] model_id: String, /// The actual revision of the model if you're referring to a model /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`. #[clap(long, env)] revision: Option<String>, /// The number of tokenizer workers used for payload validation and truncation inside the /// router. #[clap(default_value = "2", long, env)] validation_workers: usize, /// Whether to shard the model across multiple GPUs /// By default text-generation-inference will use all available GPUs to run /// the model. Setting it to `false` deactivates `num_shard`. #[clap(long, env)] sharded: Option<bool>, /// The number of shards to use if you don't want to use all GPUs on a given machine. /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance. #[clap(long, env)] num_shard: Option<usize>, /// Whether you want the model to be quantized. This will use `bitsandbytes` for /// quantization on the fly, or `gptq`. #[clap(long, env, value_enum)] quantize: Option<Quantization>, /// The dtype to be forced upon the model. This option cannot be used with `--quantize`. #[clap(long, env, value_enum)] dtype: Option<Dtype>, /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is /// encouraged when loading a model with custom code to ensure no malicious code has been /// contributed in a newer revision. #[clap(long, env, value_enum)] trust_remote_code: bool, /// The maximum amount of concurrent requests for this particular deployment. /// Having a low limit will refuse clients requests instead of having them /// wait for too long and is usually good to handle backpressure correctly. #[clap(default_value = "128", long, env)] max_concurrent_requests: usize, /// This is the maximum allowed value for clients to set `best_of`. /// Best of makes `n` generations at the same time, and return the best /// in terms of overall log probability over the entire generated sequence #[clap(default_value = "2", long, env)] max_best_of: usize, /// This is the maximum allowed value for clients to set `stop_sequences`. /// Stop sequences are used to allow the model to stop on more than just /// the EOS token, and enable more complex "prompting" where users can preprompt /// the model in a specific way and define their "own" stop token aligned with /// their prompt. #[clap(default_value = "4", long, env)] max_stop_sequences: usize, /// This is the maximum allowed input length (expressed in number of tokens) /// for users. The larger this value, the longer prompt users can send which /// can impact the overall memory required to handle the load. /// Please note that some models have a finite range of sequence they can handle. #[clap(default_value = "1024", long, env)] max_input_length: usize, /// This is the most important value to set as it defines the "memory budget" /// of running clients requests. /// Clients will send input sequences and ask to generate `max_new_tokens` /// on top. with a value of `1512` users can send either a prompt of /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for /// `1511` max_new_tokens. /// The larger this value, the larger amount each request will be in your RAM /// and the less effective batching can be. #[clap(default_value = "2048", long, env)] max_total_tokens: usize, /// This represents the ratio of waiting queries vs running queries where /// you want to start considering pausing the running queries to include the waiting /// ones into the same batch. /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's /// only 10 queries left in the current batch we check if we can fit those 12 /// waiting queries into the batching strategy, and if yes, then batching happens /// delaying the 10 running queries by a `prefill` run. /// /// This setting is only applied if there is room in the batch /// as defined by `max_batch_total_tokens`. #[clap(default_value = "1.2", long, env)] waiting_served_ratio: f32, /// Limits the number of tokens for the prefill operation. /// Since this operation take the most memory and is compute bound, it is interesting /// to limit the number of requests that can be sent. #[clap(default_value = "4096", long, env)] max_batch_prefill_tokens: u32, /// **IMPORTANT** This is one critical control to allow maximum usage /// of the available hardware. /// /// This represents the total amount of potential tokens within a batch. /// When using padding (not recommended) this would be equivalent of /// `batch_size` * `max_total_tokens`. /// /// However in the non-padded (flash attention) version this can be much finer. /// /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` /// or a single query of `1000` tokens. /// /// Overall this number should be the largest possible amount that fits the /// remaining memory (after the model is loaded). Since the actual memory overhead /// depends on other parameters like if you're using quantization, flash attention /// or the model implementation, text-generation-inference cannot infer this number /// automatically. #[clap(long, env)] max_batch_total_tokens: Option<u32>, /// This setting defines how many tokens can be passed before forcing the waiting /// queries to be put on the batch (if the size of the batch allows for it). /// New queries require 1 `prefill` forward, which is different from `decode` /// and therefore you need to pause the running batch in order to run `prefill` /// to create the correct values for the waiting queries to be able to join the batch. /// /// With a value too small, queries will always "steal" the compute to run `prefill` /// and running queries will be delayed by a lot. /// /// With a value too big, waiting queries could wait for a very long time /// before being allowed a slot in the running batch. If your server is busy /// that means that requests that could run in ~2s on an empty server could /// end up running in ~20s because the query had to wait for 18s. /// /// This number is expressed in number of tokens to make it a bit more /// "model" agnostic, but what should really matter is the overall latency /// for end users. #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, /// The IP address to listen on #[clap(default_value = "0.0.0.0", long, env)] hostname: String, /// The port to listen on. #[clap(default_value = "3000", long, short, env)] port: u16, /// The name of the socket for gRPC communication between the webserver /// and the shards. #[clap(default_value = "/tmp/text-generation-server", long, env)] shard_uds_path: String, /// The address the master shard will listen on. (setting used by torch distributed) #[clap(default_value = "localhost", long, env)] master_addr: String, /// The address the master port will listen on. (setting used by torch distributed) #[clap(default_value = "29500", long, env)] master_port: usize, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] huggingface_hub_cache: Option<String>, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] weights_cache_override: Option<String>, /// For some models (like bloom), text-generation-inference implemented custom /// cuda kernels to speed up inference. Those kernels were only tested on A100. /// Use this flag to disable them if you're running on different hardware and /// encounter issues. #[clap(long, env)] disable_custom_kernels: bool, /// Limit the CUDA available memory. /// The allowed value equals the total visible memory multiplied by cuda-memory-fraction. #[clap(default_value = "1.0", long, env)] cuda_memory_fraction: f32, /// Outputs the logs in JSON format (useful for telemetry) #[clap(long, env)] json_output: bool, #[clap(long, env)] otlp_endpoint: Option<String>, #[clap(long, env)] cors_allow_origin: Vec<String>, #[clap(long, env)] watermark_gamma: Option<f32>, #[clap(long, env)] watermark_delta: Option<f32>, /// Enable ngrok tunneling #[clap(long, env)] ngrok: bool, /// ngrok authentication token #[clap(long, env)] ngrok_authtoken: Option<String>, /// ngrok edge #[clap(long, env)] ngrok_edge: Option<String>, /// Display a lot of information about your runtime environment #[clap(long, short, action)] env: bool, } #[derive(Debug)] enum ShardStatus { Ready, Failed(usize), } #[allow(clippy::too_many_arguments)] fn shard_manager( model_id: String, revision: Option<String>, quantize: Option<Quantization>, dtype: Option<Dtype>, trust_remote_code: bool, uds_path: String, rank: usize, world_size: usize, master_addr: String, master_port: usize, huggingface_hub_cache: Option<String>, weights_cache_override: Option<String>, disable_custom_kernels: bool, watermark_gamma: Option<f32>, watermark_delta: Option<f32>, cuda_memory_fraction: f32, otlp_endpoint: Option<String>, status_sender: mpsc::Sender<ShardStatus>, shutdown: Arc<AtomicBool>, _shutdown_sender: mpsc::Sender<()>, ) { // Enter shard-manager tracing span let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered(); // Get UDS path let uds_string = format!("{uds_path}-{rank}"); let uds = Path::new(&uds_string); // Clean previous runs if uds.exists() { fs::remove_file(uds).unwrap(); } // Process args let mut shard_args = vec![ "serve".to_string(), model_id, "--uds-path".to_string(), uds_path, "--logger-level".to_string(), "INFO".to_string(), "--json-output".to_string(), ]; // Activate trust remote code if trust_remote_code { shard_args.push("--trust-remote-code".to_string()); } // Activate tensor parallelism if world_size > 1 { shard_args.push("--sharded".to_string()); } if let Some(quantize) = quantize { shard_args.push("--quantize".to_string()); shard_args.push(quantize.to_string()) } if let Some(dtype) = dtype { shard_args.push("--dtype".to_string()); shard_args.push(dtype.to_string()) } // Model optional revision if let Some(revision) = revision { shard_args.push("--revision".to_string()); shard_args.push(revision) } // OpenTelemetry if let Some(otlp_endpoint) = otlp_endpoint { shard_args.push("--otlp-endpoint".to_string()); shard_args.push(otlp_endpoint); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Torch Distributed Env vars envs.push(("RANK".into(), rank.to_string().into())); envs.push(("WORLD_SIZE".into(), world_size.to_string().into())); envs.push(("MASTER_ADDR".into(), master_addr.into())); envs.push(("MASTER_PORT".into(), master_port.to_string().into())); envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into())); // CUDA memory fraction envs.push(( "CUDA_MEMORY_FRACTION".into(), cuda_memory_fraction.to_string().into(), )); // Safetensors load fast envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into())); // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; // If huggingface_hub_cache is some, pass it to the shard // Useful when running inside a docker container if let Some(huggingface_hub_cache) = huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // If weights_cache_override is some, pass it to the shard // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // If disable_custom_kernels is true, pass it to the shard as an env var if disable_custom_kernels { envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into())) } // Watermark Gamma if let Some(watermark_gamma) = watermark_gamma { envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into())) } // Watermark Delta if let Some(watermark_delta) = watermark_delta { envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into())) } // Start process tracing::info!("Starting shard"); let mut p = match Command::new("text-generation-server") .args(shard_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } { tracing::error!("{}", err); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } }; // Redirect STDOUT to the console let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap()); let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap()); //stdout tracing thread thread::spawn(move || { log_lines(shard_stdout_reader.lines()); }); let mut ready = false; let start_time = Instant::now(); let mut wait_time = Instant::now(); loop { // Process exited if let Some(exit_status) = p.try_wait().unwrap() { // We read stderr in another thread as it seems that lines() can block in some cases let (err_sender, err_receiver) = mpsc::channel(); thread::spawn(move || { for line in shard_stderr_reader.lines().flatten() { err_sender.send(line).unwrap_or(()); } }); let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; } tracing::error!("Shard complete standard error output:\n{err}"); if let Some(signal) = exit_status.signal() { tracing::error!("Shard process was signaled to shutdown with signal {signal}"); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } // We received a shutdown signal if shutdown.load(Ordering::SeqCst) { p.kill().unwrap(); let _ = p.wait(); tracing::info!("Shard terminated"); return; } // Shard is ready if uds.exists() && !ready { tracing::info!("Shard ready in {:?}", start_time.elapsed()); status_sender.send(ShardStatus::Ready).unwrap(); ready = true; } else if !ready && wait_time.elapsed() > Duration::from_secs(10) { tracing::info!("Waiting for shard to be ready..."); wait_time = Instant::now(); } sleep(Duration::from_millis(100)); } } fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) { tracing::info!("Shutting down shards"); // Update shutdown value to true // This will be picked up by the shard manager shutdown.store(true, Ordering::SeqCst); // Wait for shards to shutdown // This will block till all shutdown_sender are dropped let _ = shutdown_receiver.recv(); } fn num_cuda_devices() -> Option<usize> { let devices = match env::var("CUDA_VISIBLE_DEVICES") { Ok(devices) => devices, Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?, }; let n_devices = devices.split(',').count(); Some(n_devices) } #[derive(Deserialize)] #[serde(rename_all = "UPPERCASE")] enum PythonLogLevelEnum { Trace, Debug, Info, Success, Warning, Error, Critical, } #[derive(Deserialize)] struct PythonLogLevel { name: PythonLogLevelEnum, } #[derive(Deserialize)] struct PythonLogRecord { level: PythonLogLevel, } #[derive(Deserialize)] struct PythonLogMessage { text: String, record: PythonLogRecord, } impl PythonLogMessage { fn trace(&self) { match self.record.level.name { PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text), PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text), PythonLogLevelEnum::Info => tracing::info!("{}", self.text), PythonLogLevelEnum::Success => tracing::info!("{}", self.text), PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text), PythonLogLevelEnum::Error => tracing::error!("{}", self.text), PythonLogLevelEnum::Critical => tracing::error!("{}", self.text), } } } impl TryFrom<&String> for PythonLogMessage { type Error = serde_json::Error; fn try_from(value: &String) -> Result<Self, Self::Error> { serde_json::from_str::<Self>(value) } } fn log_lines<S: Sized + BufRead>(lines: Lines<S>) { for line in lines.flatten() { match PythonLogMessage::try_from(&line) { Ok(log) => log.trace(), Err(_) => tracing::debug!("{line}"), } } } fn find_num_shards( sharded: Option<bool>, num_shard: Option<usize>, ) -> Result<usize, LauncherError> { // get the number of shards given `sharded` and `num_shard` let num_shard = match (sharded, num_shard) { (Some(true), None) => { // try to default to the number of available GPUs tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES"); let n_devices = num_cuda_devices() .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set"); if n_devices <= 1 { return Err(LauncherError::NotEnoughCUDADevices(format!( "`sharded` is true but only found {n_devices} CUDA devices" ))); } n_devices } (Some(true), Some(num_shard)) => { // we can't have only one shard while sharded if num_shard <= 1 { return Err(LauncherError::ArgumentValidation( "`sharded` is true but `num_shard` <= 1".to_string(), )); } num_shard } (Some(false), Some(num_shard)) => num_shard, (Some(false), None) => 1, (None, None) => num_cuda_devices().unwrap_or(1), (None, Some(num_shard)) => num_shard, }; if num_shard < 1 { return Err(LauncherError::ArgumentValidation( "`num_shard` cannot be < 1".to_string(), )); } Ok(num_shard) } #[derive(Debug)] enum LauncherError { ArgumentValidation(String), NotEnoughCUDADevices(String), DownloadError, ShardCannotStart, ShardDisconnected, ShardFailed, WebserverFailed, WebserverCannotStart, } fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> { // Enter download tracing span let _span = tracing::span!(tracing::Level::INFO, "download").entered(); let mut download_args = vec![ "download-weights".to_string(), args.model_id.to_string(), "--extension".to_string(), ".safetensors".to_string(), "--logger-level".to_string(), "INFO".to_string(), "--json-output".to_string(), ]; // Model optional revision if let Some(revision) = &args.revision { download_args.push("--revision".to_string()); download_args.push(revision.to_string()) } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // If huggingface_hub_cache is set, pass it to the download process // Useful when running inside a docker container if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; // If args.weights_cache_override is some, pass it to the download process // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = &args.weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // Start process tracing::info!("Starting download process."); let mut download_process = match Command::new("text-generation-server") .args(download_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } else { tracing::error!("{}", err); } return Err(LauncherError::DownloadError); } }; // Redirect STDOUT to the console let download_stdout = download_process.stdout.take().unwrap(); let stdout = BufReader::new(download_stdout); thread::spawn(move || { log_lines(stdout.lines()); }); loop { if let Some(status) = download_process.try_wait().unwrap() { if status.success() { tracing::info!("Successfully downloaded weights."); break; } let mut err = String::new(); download_process .stderr .take() .unwrap() .read_to_string(&mut err) .unwrap(); if let Some(signal) = status.signal() { tracing::error!( "Download process was signaled to shutdown with signal {signal}: {err}" ); } else { tracing::error!("Download encountered an error: {err}"); } return Err(LauncherError::DownloadError); } if !running.load(Ordering::SeqCst) { terminate("download", download_process, Duration::from_secs(10)).unwrap(); return Ok(()); } sleep(Duration::from_millis(100)); } Ok(()) } #[allow(clippy::too_many_arguments)] fn spawn_shards( num_shard: usize, args: &Args, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, shutdown_sender: mpsc::Sender<()>, status_receiver: &mpsc::Receiver<ShardStatus>, status_sender: mpsc::Sender<ShardStatus>, running: Arc<AtomicBool>, ) -> Result<(), LauncherError> { // Start shard processes for rank in 0..num_shard { let model_id = args.model_id.clone(); let revision = args.revision.clone(); let uds_path = args.shard_uds_path.clone(); let master_addr = args.master_addr.clone(); let huggingface_hub_cache = args.huggingface_hub_cache.clone(); let weights_cache_override = args.weights_cache_override.clone(); let status_sender = status_sender.clone(); let shutdown = shutdown.clone(); let shutdown_sender = shutdown_sender.clone(); let otlp_endpoint = args.otlp_endpoint.clone(); let quantize = args.quantize; let dtype = args.dtype; let trust_remote_code = args.trust_remote_code; let master_port = args.master_port; let disable_custom_kernels = args.disable_custom_kernels; let watermark_gamma = args.watermark_gamma; let watermark_delta = args.watermark_delta; let cuda_memory_fraction = args.cuda_memory_fraction; thread::spawn(move || { shard_manager( model_id, revision, quantize, dtype, trust_remote_code, uds_path, rank, num_shard, master_addr, master_port, huggingface_hub_cache, weights_cache_override, disable_custom_kernels, watermark_gamma, watermark_delta, cuda_memory_fraction, otlp_endpoint, status_sender, shutdown, shutdown_sender, ) }); } drop(shutdown_sender); // Wait for shard to start let mut shard_ready = 0; while running.load(Ordering::SeqCst) { match status_receiver.try_recv() { Ok(ShardStatus::Ready) => { shard_ready += 1; if shard_ready == num_shard { break; } } Err(TryRecvError::Empty) => { sleep(Duration::from_millis(100)); } Ok(ShardStatus::Failed(rank)) => { tracing::error!("Shard {rank} failed to start"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardCannotStart); } Err(TryRecvError::Disconnected) => { tracing::error!("Shard status channel disconnected"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardDisconnected); } } } Ok(()) } fn spawn_webserver( args: Args, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, ) -> Result<Child, LauncherError> { // All shard started // Start webserver tracing::info!("Starting Webserver"); let mut router_args = vec![ "--max-concurrent-requests".to_string(), args.max_concurrent_requests.to_string(), "--max-best-of".to_string(), args.max_best_of.to_string(), "--max-stop-sequences".to_string(), args.max_stop_sequences.to_string(), "--max-input-length".to_string(), args.max_input_length.to_string(), "--max-total-tokens".to_string(), args.max_total_tokens.to_string(), "--max-batch-prefill-tokens".to_string(), args.max_batch_prefill_tokens.to_string(), "--waiting-served-ratio".to_string(), args.waiting_served_ratio.to_string(), "--max-waiting-tokens".to_string(), args.max_waiting_tokens.to_string(), "--validation-workers".to_string(), args.validation_workers.to_string(), "--hostname".to_string(), args.hostname.to_string(), "--port".to_string(), args.port.to_string(), "--master-shard-uds-path".to_string(), format!("{}-0", args.shard_uds_path), "--tokenizer-name".to_string(), args.model_id, ]; // Model optional max batch total tokens if let Some(max_batch_total_tokens) = args.max_batch_total_tokens { router_args.push("--max-batch-total-tokens".to_string()); router_args.push(max_batch_total_tokens.to_string()); } // Model optional revision if let Some(ref revision) = args.revision { router_args.push("--revision".to_string()); router_args.push(revision.to_string()) } if args.json_output { router_args.push("--json-output".to_string()); } // OpenTelemetry if let Some(otlp_endpoint) = args.otlp_endpoint { router_args.push("--otlp-endpoint".to_string()); router_args.push(otlp_endpoint); } // CORS origins for origin in args.cors_allow_origin.into_iter() { router_args.push("--cors-allow-origin".to_string()); router_args.push(origin); } // Ngrok if args.ngrok { router_args.push("--ngrok".to_string()); router_args.push("--ngrok-authtoken".to_string()); router_args.push(args.ngrok_authtoken.unwrap()); router_args.push("--ngrok-edge".to_string()); router_args.push(args.ngrok_edge.unwrap()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; let mut webserver = match Command::new("text-generation-router") .args(router_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { tracing::error!("Failed to start webserver: {}", err); if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-router not found in PATH"); tracing::error!("Please install it with `make install-router`") } else { tracing::error!("{}", err); } shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::WebserverCannotStart); } }; // Redirect STDOUT and STDERR to the console let webserver_stdout = webserver.stdout.take().unwrap(); let webserver_stderr = webserver.stderr.take().unwrap(); thread::spawn(move || { let stdout = BufReader::new(webserver_stdout); let stderr = BufReader::new(webserver_stderr); for line in stdout.lines() { println!("{}", line.unwrap()); } for line in stderr.lines() { println!("{}", line.unwrap()); } }); Ok(webserver) } fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> { tracing::info!("Terminating {process_name}"); let terminate_time = Instant::now(); signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap(); tracing::info!("Waiting for {process_name} to gracefully shutdown"); while terminate_time.elapsed() < timeout { if let Some(status) = process.try_wait()? { tracing::info!("{process_name} terminated"); return Ok(status); } sleep(Duration::from_millis(100)); } tracing::info!("Killing {process_name}"); process.kill()?; let exit_status = process.wait()?; tracing::info!("{process_name} killed"); Ok(exit_status) } fn main() -> Result<(), LauncherError> { // Pattern match configuration let args: Args = Args::parse(); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); if args.json_output { tracing_subscriber::fmt() .with_env_filter(env_filter) .json() .init(); } else { tracing_subscriber::fmt() .with_env_filter(env_filter) .compact() .init(); } if args.env { let env_runtime = env_runtime::Env::new(); tracing::info!("{}", env_runtime); } tracing::info!("{:?}", args); // Validate args if args.max_input_length >= args.max_total_tokens { return Err(LauncherError::ArgumentValidation( "`max_input_length` must be < `max_total_tokens`".to_string(), )); } if args.max_input_length as u32 > args.max_batch_prefill_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {} and {}", args.max_batch_prefill_tokens, args.max_input_length ))); } if args.validation_workers == 0 { return Err(LauncherError::ArgumentValidation( "`validation_workers` must be > 0".to_string(), )); } if args.trust_remote_code { tracing::warn!( "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.", args.model_id ); } let num_shard = find_num_shards(args.sharded, args.num_shard)?; if num_shard > 1 { tracing::info!("Sharding model on {num_shard} processes"); } if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens { if args.max_batch_prefill_tokens > *max_batch_total_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}", args.max_batch_prefill_tokens, max_batch_total_tokens ))); } if args.max_total_tokens as u32 > *max_batch_total_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}", args.max_total_tokens, max_batch_total_tokens ))); } } if args.ngrok { if args.ngrok_authtoken.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-authtoken` must be set when using ngrok tunneling".to_string(), )); } if args.ngrok_edge.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-edge` must be set when using ngrok tunneling".to_string(), )); } } // Signal handler let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { r.store(false, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); // Download and convert model weights download_convert_model(&args, running.clone())?; if !running.load(Ordering::SeqCst) { // Launcher was asked to stop return Ok(()); } // Shared shutdown bool let shutdown = Arc::new(AtomicBool::new(false)); // Shared shutdown channel // When shutting down, the main thread will wait for all senders to be dropped let (shutdown_sender, shutdown_receiver) = mpsc::channel(); // Shared channel to track shard status let (status_sender, status_receiver) = mpsc::channel(); spawn_shards( num_shard, &args, shutdown.clone(), &shutdown_receiver, shutdown_sender, &status_receiver, status_sender, running.clone(), )?; // We might have received a termination signal if !running.load(Ordering::SeqCst) { shutdown_shards(shutdown, &shutdown_receiver); return Ok(()); } let mut webserver = spawn_webserver(args, shutdown.clone(), &shutdown_receiver).map_err(|err| { shutdown_shards(shutdown.clone(), &shutdown_receiver); err })?; // Default exit code let mut exit_code = Ok(()); while running.load(Ordering::SeqCst) { if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() { tracing::error!("Shard {rank} crashed"); exit_code = Err(LauncherError::ShardFailed); break; }; match webserver.try_wait().unwrap() { Some(_) => { tracing::error!("Webserver Crashed"); shutdown_shards(shutdown, &shutdown_receiver); return Err(LauncherError::WebserverFailed); } None => { sleep(Duration::from_millis(100)); } }; } // Graceful termination terminate("webserver", webserver, Duration::from_secs(90)).unwrap(); shutdown_shards(shutdown, &shutdown_receiver); exit_code }
0
hf_public_repos/text-generation-inference/launcher
hf_public_repos/text-generation-inference/launcher/src/env_runtime.rs
use std::fmt; use std::process::Command; pub(crate) struct Env { cargo_target: &'static str, cargo_version: &'static str, git_sha: &'static str, docker_label: &'static str, nvidia_env: String, } impl Env { pub fn new() -> Self { let nvidia_env = nvidia_smi(); Self { nvidia_env: nvidia_env.unwrap_or("N/A".to_string()), cargo_target: env!("VERGEN_CARGO_TARGET_TRIPLE"), cargo_version: env!("VERGEN_RUSTC_SEMVER"), git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"), docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"), } } } impl fmt::Display for Env { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Runtime environment:")?; writeln!(f, "Target: {}", self.cargo_target)?; writeln!(f, "Cargo version: {}", self.cargo_version)?; writeln!(f, "Commit sha: {}", self.git_sha)?; writeln!(f, "Docker label: {}", self.docker_label)?; write!(f, "nvidia-smi:\n{}", self.nvidia_env)?; Ok(()) } } fn nvidia_smi() -> Option<String> { let output = Command::new("nvidia-smi").output().ok()?; let nvidia_smi = String::from_utf8(output.stdout).ok()?; let output = nvidia_smi.replace('\n', "\n "); Some(output.trim().to_string()) }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/index.html
<html> <head> <!-- Load the latest Swagger UI code and style from npm using unpkg.com --> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script> <link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@3/swagger-ui.css"/> <title>Text Generation Inference API</title> </head> <body> <div id="swagger-ui"></div> <!-- Div to hold the UI component --> <script> window.onload = function () { // Begin Swagger UI call region const ui = SwaggerUIBundle({ url: "openapi.json", //Location of Open API spec in the repo dom_id: '#swagger-ui', deepLinking: true, supportedSubmitMethods: [], presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], }) window.ui = ui } </script> </body> </html>
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/docs/openapi.json
{ "openapi": "3.0.3", "info": { "title": "Text Generation Inference", "description": "Text Generation Webserver", "contact": { "name": "Olivier Dehaene" }, "license": { "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, "version": "0.9.3" }, "paths": { "/": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "description": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "operationId": "compat_generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompatGenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "description": "Generate tokens", "operationId": "generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate_stream": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate a stream of token using Server-Sent Events", "description": "Generate a stream of token using Server-Sent Events", "operationId": "generate_stream", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/health": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Health check method", "description": "Health check method", "operationId": "health", "responses": { "200": { "description": "Everything is working fine" }, "503": { "description": "Text generation inference is down", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "unhealthy", "error_type": "healthcheck" } } } } } } }, "/info": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Text Generation Inference endpoint info", "description": "Text Generation Inference endpoint info", "operationId": "get_model_info", "responses": { "200": { "description": "Served model info", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Info" } } } } } } }, "/metrics": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Prometheus metrics scrape endpoint", "description": "Prometheus metrics scrape endpoint", "operationId": "metrics", "responses": { "200": { "description": "Prometheus Metrics", "content": { "text/plain": { "schema": { "type": "string" } } } } } } } }, "components": { "schemas": { "BestOfSequence": { "type": "object", "required": [ "generated_text", "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_text": { "type": "string", "example": "test" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "CompatGenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" }, "stream": { "type": "boolean", "default": "false" } } }, "Details": { "type": "object", "required": [ "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "best_of_sequences": { "type": "array", "items": { "$ref": "#/components/schemas/BestOfSequence" }, "nullable": true }, "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "ErrorResponse": { "type": "object", "required": [ "error", "error_type" ], "properties": { "error": { "type": "string" }, "error_type": { "type": "string" } } }, "FinishReason": { "type": "string", "enum": [ "length", "eos_token", "stop_sequence" ] }, "GenerateParameters": { "type": "object", "properties": { "best_of": { "type": "integer", "default": "null", "example": 1, "nullable": true, "minimum": 0.0, "exclusiveMinimum": 0.0 }, "decoder_input_details": { "type": "boolean", "default": "true" }, "details": { "type": "boolean", "default": "true" }, "do_sample": { "type": "boolean", "default": "false", "example": true }, "max_new_tokens": { "type": "integer", "format": "int32", "default": "20", "minimum": 0.0, "exclusiveMaximum": 512.0, "exclusiveMinimum": 0.0 }, "repetition_penalty": { "type": "number", "format": "float", "default": "null", "example": 1.03, "nullable": true, "exclusiveMinimum": 0.0 }, "return_full_text": { "type": "boolean", "default": "null", "example": false, "nullable": true }, "seed": { "type": "integer", "format": "int64", "default": "null", "example": "null", "nullable": true, "minimum": 0.0, "exclusiveMinimum": 0.0 }, "stop": { "type": "array", "items": { "type": "string" }, "example": [ "photographer" ], "maxItems": 4 }, "temperature": { "type": "number", "format": "float", "default": "null", "example": 0.5, "nullable": true, "exclusiveMinimum": 0.0 }, "top_k": { "type": "integer", "format": "int32", "default": "null", "example": 10, "nullable": true, "exclusiveMinimum": 0.0 }, "top_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1.0, "exclusiveMinimum": 0.0 }, "truncate": { "type": "integer", "default": "null", "example": "null", "nullable": true, "minimum": 0.0 }, "typical_p": { "type": "number", "format": "float", "default": "null", "example": 0.95, "nullable": true, "maximum": 1.0, "exclusiveMinimum": 0.0 }, "watermark": { "type": "boolean", "default": "false", "example": true } } }, "GenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" } } }, "GenerateResponse": { "type": "object", "required": [ "generated_text" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/Details" } ], "nullable": true }, "generated_text": { "type": "string", "example": "test" } } }, "Info": { "type": "object", "required": [ "model_id", "model_dtype", "model_device_type", "max_concurrent_requests", "max_best_of", "max_stop_sequences", "max_input_length", "max_total_tokens", "waiting_served_ratio", "max_batch_total_tokens", "max_waiting_tokens", "validation_workers", "version" ], "properties": { "docker_label": { "type": "string", "example": "null", "nullable": true }, "max_batch_total_tokens": { "type": "integer", "format": "int32", "example": "32000", "minimum": 0.0 }, "max_best_of": { "type": "integer", "example": "2", "minimum": 0.0 }, "max_concurrent_requests": { "type": "integer", "description": "Router Parameters", "example": "128", "minimum": 0.0 }, "max_input_length": { "type": "integer", "example": "1024", "minimum": 0.0 }, "max_stop_sequences": { "type": "integer", "example": "4", "minimum": 0.0 }, "max_total_tokens": { "type": "integer", "example": "2048", "minimum": 0.0 }, "max_waiting_tokens": { "type": "integer", "example": "20", "minimum": 0.0 }, "model_device_type": { "type": "string", "example": "cuda" }, "model_dtype": { "type": "string", "example": "torch.float16" }, "model_id": { "type": "string", "description": "Model info", "example": "bigscience/blomm-560m" }, "model_pipeline_tag": { "type": "string", "example": "text-generation", "nullable": true }, "model_sha": { "type": "string", "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", "nullable": true }, "sha": { "type": "string", "example": "null", "nullable": true }, "validation_workers": { "type": "integer", "example": "2", "minimum": 0.0 }, "version": { "type": "string", "description": "Router Info", "example": "0.5.0" }, "waiting_served_ratio": { "type": "number", "format": "float", "example": "1.2" } } }, "PrefillToken": { "type": "object", "required": [ "id", "text", "logprob" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0.0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "text": { "type": "string", "example": "test" } } }, "StreamDetails": { "type": "object", "required": [ "finish_reason", "generated_tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0.0 }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0.0 } } }, "StreamResponse": { "type": "object", "required": [ "token" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/StreamDetails" } ], "nullable": true }, "generated_text": { "type": "string", "default": "null", "example": "test", "nullable": true }, "token": { "$ref": "#/components/schemas/Token" } } }, "Token": { "type": "object", "required": [ "id", "text", "logprob", "special" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0.0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "special": { "type": "boolean", "example": "false" }, "text": { "type": "string", "example": "test" } } } } }, "tags": [ { "name": "Text Generation Inference", "description": "Hugging Face Text Generation Inference API" } ] }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/requirements.txt
syrupy text-generation pytest pytest-asyncio==0.17.2 docker
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/conftest.py
import sys import subprocess import contextlib import pytest import asyncio import os import docker import json import math import time import random from docker.errors import NotFound from typing import Optional, List, Dict from syrupy.extensions.json import JSONSnapshotExtension from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError from text_generation import AsyncClient from text_generation.types import Response, Details, InputToken, Token, BestOfSequence DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None) HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None) DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data") class ResponseComparator(JSONSnapshotExtension): def serialize( self, data, *, exclude=None, matcher=None, ): if isinstance(data, List): data = [d.dict() for d in data] data = self._filter( data=data, depth=0, path=(), exclude=exclude, matcher=matcher ) return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n" def matches( self, *, serialized_data, snapshot_data, ) -> bool: def convert_data(data): data = json.loads(data) if isinstance(data, Dict): return Response(**data) if isinstance(data, List): return [Response(**d) for d in data] raise NotImplementedError def eq_token(token: Token, other: Token) -> bool: return ( token.id == other.id and token.text == other.text and math.isclose(token.logprob, other.logprob, rel_tol=0.2) and token.special == other.special ) def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool: try: return ( prefill_token.id == other.id and prefill_token.text == other.text and ( math.isclose(prefill_token.logprob, other.logprob, rel_tol=0.2) if prefill_token.logprob is not None else prefill_token.logprob == other.logprob ) ) except TypeError: return False def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool: return ( details.finish_reason == other.finish_reason and details.generated_tokens == other.generated_tokens and details.seed == other.seed and len(details.prefill) == len(other.prefill) and all( [ eq_prefill_token(d, o) for d, o in zip(details.prefill, other.prefill) ] ) and len(details.tokens) == len(other.tokens) and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)]) ) def eq_details(details: Details, other: Details) -> bool: return ( details.finish_reason == other.finish_reason and details.generated_tokens == other.generated_tokens and details.seed == other.seed and len(details.prefill) == len(other.prefill) and all( [ eq_prefill_token(d, o) for d, o in zip(details.prefill, other.prefill) ] ) and len(details.tokens) == len(other.tokens) and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)]) and ( len(details.best_of_sequences) if details.best_of_sequences is not None else 0 ) == ( len(other.best_of_sequences) if other.best_of_sequences is not None else 0 ) and ( all( [ eq_best_of(d, o) for d, o in zip( details.best_of_sequences, other.best_of_sequences ) ] ) if details.best_of_sequences is not None else details.best_of_sequences == other.best_of_sequences ) ) def eq_response(response: Response, other: Response) -> bool: return response.generated_text == other.generated_text and eq_details( response.details, other.details ) serialized_data = convert_data(serialized_data) snapshot_data = convert_data(snapshot_data) if not isinstance(serialized_data, List): serialized_data = [serialized_data] if not isinstance(snapshot_data, List): snapshot_data = [snapshot_data] return len(snapshot_data) == len(serialized_data) and all( [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)] ) class LauncherHandle: def __init__(self, port: int): self.client = AsyncClient(f"http://localhost:{port}") def _inner_health(self): raise NotImplementedError async def health(self, timeout: int = 60): assert timeout > 0 for _ in range(timeout): if not self._inner_health(): raise RuntimeError("Launcher crashed") try: await self.client.generate("test") return except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e: time.sleep(1) raise RuntimeError("Health check failed") class ContainerLauncherHandle(LauncherHandle): def __init__(self, docker_client, container_name, port: int): super(ContainerLauncherHandle, self).__init__(port) self.docker_client = docker_client self.container_name = container_name def _inner_health(self) -> bool: container = self.docker_client.containers.get(self.container_name) return container.status in ["running", "created"] class ProcessLauncherHandle(LauncherHandle): def __init__(self, process, port: int): super(ProcessLauncherHandle, self).__init__(port) self.process = process def _inner_health(self) -> bool: return self.process.poll() is None @pytest.fixture def response_snapshot(snapshot): return snapshot.use_extension(ResponseComparator) @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest.fixture(scope="module") def launcher(event_loop): @contextlib.contextmanager def local_launcher( model_id: str, num_shard: Optional[int] = None, quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, ): port = random.randint(8000, 10_000) master_port = random.randint(10_000, 20_000) shard_uds_path = ( f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server" ) args = [ "text-generation-launcher", "--model-id", model_id, "--port", str(port), "--master-port", str(master_port), "--shard-uds-path", shard_uds_path, ] env = os.environ if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: args.append("--quantize") args.append(quantize) if trust_remote_code: args.append("--trust-remote-code") env["LOG_LEVEL"] = "info,text_generation_router=debug" if not use_flash_attention: env["USE_FLASH_ATTENTION"] = "false" with subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) as process: yield ProcessLauncherHandle(process, port) process.terminate() process.wait(60) launcher_output = process.stdout.read().decode("utf-8") print(launcher_output, file=sys.stderr) process.stdout.close() process.stderr.close() if not use_flash_attention: del env["USE_FLASH_ATTENTION"] @contextlib.contextmanager def docker_launcher( model_id: str, num_shard: Optional[int] = None, quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, ): port = random.randint(8000, 10_000) args = ["--model-id", model_id, "--env"] if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: args.append("--quantize") args.append(quantize) if trust_remote_code: args.append("--trust-remote-code") client = docker.from_env() container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}" try: container = client.containers.get(container_name) container.stop() container.wait() except NotFound: pass gpu_count = num_shard if num_shard is not None else 1 env = {"LOG_LEVEL": "info,text_generation_router=debug"} if not use_flash_attention: env["USE_FLASH_ATTENTION"] = "false" if HUGGING_FACE_HUB_TOKEN is not None: env["HUGGING_FACE_HUB_TOKEN"] = HUGGING_FACE_HUB_TOKEN volumes = [] if DOCKER_VOLUME: volumes = [f"{DOCKER_VOLUME}:/data"] container = client.containers.run( DOCKER_IMAGE, command=args, name=container_name, environment=env, auto_remove=False, detach=True, device_requests=[ docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]]) ], volumes=volumes, ports={"80/tcp": port}, ) yield ContainerLauncherHandle(client, container.name, port) if not use_flash_attention: del env["USE_FLASH_ATTENTION"] try: container.stop() container.wait() except NotFound: pass container_output = container.logs().decode("utf-8") print(container_output, file=sys.stderr) container.remove() if DOCKER_IMAGE is not None: return docker_launcher return local_launcher @pytest.fixture(scope="module") def generate_load(): async def generate_load_inner( client: AsyncClient, prompt: str, max_new_tokens: int, n: int ) -> List[Response]: futures = [ client.generate( prompt, max_new_tokens=max_new_tokens, decoder_input_details=True ) for _ in range(n) ] return await asyncio.gather(*futures) return generate_load_inner
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/integration-tests/pytest.ini
[pytest] addopts = --snapshot-warn-unused asyncio_mode = auto markers = private: marks tests as requiring an admin hf token (deselect with '-m "not private"')
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py
import pytest @pytest.fixture(scope="module") def bloom_560m_sharded_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560m_sharded(bloom_560m_sharded_handle): await bloom_560m_sharded_handle.health(240) return bloom_560m_sharded_handle.client @pytest.mark.asyncio async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot): response = await bloom_560m_sharded.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_sharded_load( bloom_560m_sharded, generate_load, response_snapshot ): responses = await generate_load( bloom_560m_sharded, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_t5_sharded.py
import pytest @pytest.fixture(scope="module") def t5_sharded_handle(launcher): with launcher("google/flan-t5-xxl", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def t5_sharded(t5_sharded_handle): await t5_sharded_handle.health(300) return t5_sharded_handle.client @pytest.mark.asyncio async def test_t5_sharded(t5_sharded, response_snapshot): response = await t5_sharded.generate( "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, decoder_input_details=True, ) assert response == response_snapshot @pytest.mark.asyncio async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot): responses = await generate_load( t5_sharded, "Please answer the following question. What is the boiling point of Nitrogen?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_neox.py
import pytest @pytest.fixture(scope="module") def neox_handle(launcher): with launcher( "stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox(neox_handle): await neox_handle.health(300) return neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox, response_snapshot): response = await neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox, generate_load, response_snapshot): responses = await generate_load( neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_mt0_base.py
import pytest @pytest.fixture(scope="module") def mt0_base_handle(launcher): with launcher("bigscience/mt0-base") as handle: yield handle @pytest.fixture(scope="module") async def mt0_base(mt0_base_handle): await mt0_base_handle.health(300) return mt0_base_handle.client @pytest.mark.asyncio async def test_mt0_base(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_all_params(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 9 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_load(mt0_base, generate_load, response_snapshot): responses = await generate_load( mt0_base, "Why is the sky blue?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py
import pytest @pytest.fixture(scope="module") def flash_starcoder_gptq_handle(launcher): with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder_gptq(flash_starcoder_gptq_handle): await flash_starcoder_gptq_handle.health(300) return flash_starcoder_gptq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq(flash_starcoder_gptq, response_snapshot): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, decoder_input_details=True, ) assert response.details.generated_tokens == 20 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_default_params( flash_starcoder_gptq, response_snapshot ): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 20 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_gptq_load( flash_starcoder_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_starcoder_gptq, "def geometric_mean(L: List[float]):", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_mpt.py
import pytest @pytest.fixture(scope="module") def mpt_sharded_handle(launcher): with launcher("mosaicml/mpt-7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def mpt_sharded(mpt_sharded_handle): await mpt_sharded_handle.health(300) return mpt_sharded_handle.client @pytest.mark.asyncio async def test_mpt(mpt_sharded, response_snapshot): response = await mpt_sharded.generate( "What is Deep Learning?", max_new_tokens=17, decoder_input_details=True, ) assert response.details.generated_tokens == 17 assert ( response.generated_text == " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" ) assert response == response_snapshot @pytest.mark.asyncio async def test_mpt_load(mpt_sharded, generate_load, response_snapshot): responses = await generate_load( mpt_sharded, "What is Deep Learning?", max_new_tokens=17, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert ( responses[0].generated_text == " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" ) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_santacoder.py
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_falcon.py
import pytest @pytest.fixture(scope="module") def flash_falcon_handle(launcher): with launcher("tiiuae/falcon-7b", trust_remote_code=True) as handle: yield handle @pytest.fixture(scope="module") async def flash_falcon(flash_falcon_handle): await flash_falcon_handle.health(300) return flash_falcon_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon(flash_falcon, response_snapshot): response = await flash_falcon.generate( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon_all_params(flash_falcon, response_snapshot): response = await flash_falcon.generate( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_falcon_load(flash_falcon, generate_load, response_snapshot): responses = await generate_load( flash_falcon, "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox_sharded.py
import pytest @pytest.fixture(scope="module") def flash_neox_sharded_handle(launcher): with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox_sharded(flash_neox_sharded_handle): await flash_neox_sharded_handle.health(300) return flash_neox_sharded_handle.client @pytest.mark.asyncio async def test_flash_neox(flash_neox_sharded, response_snapshot): response = await flash_neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot): responses = await generate_load( flash_neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m.py
import pytest @pytest.fixture(scope="module") def bloom_560_handle(launcher): with launcher("bigscience/bloom-560m") as handle: yield handle @pytest.fixture(scope="module") async def bloom_560(bloom_560_handle): await bloom_560_handle.health(240) return bloom_560_handle.client @pytest.mark.asyncio async def test_bloom_560m(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_all_params(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot): responses = await generate_load( bloom_560, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox.py
import pytest @pytest.fixture(scope="module") def flash_neox_handle(launcher): with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox(flash_neox_handle): await flash_neox_handle.health(300) return flash_neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox(flash_neox, response_snapshot): response = await flash_neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox_load(flash_neox, generate_load, response_snapshot): responses = await generate_load( flash_neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama.py
import pytest @pytest.fixture(scope="module") def flash_llama_handle(launcher): with launcher("huggingface/llama-7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama(flash_llama_handle): await flash_llama_handle.health(300) return flash_llama_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_all_params(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_load(flash_llama, generate_load, response_snapshot): responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder.py
import pytest @pytest.fixture(scope="module") def flash_starcoder_handle(launcher): with launcher("bigcode/starcoder", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder(flash_starcoder_handle): await flash_starcoder_handle.health(300) return flash_starcoder_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_default_params(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=60, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 60 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_load(flash_starcoder, generate_load, response_snapshot): responses = await generate_load( flash_starcoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_neox_sharded.py
import pytest @pytest.fixture(scope="module") def neox_sharded_handle(launcher): with launcher( "OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox_sharded(neox_sharded_handle): await neox_sharded_handle.health(300) return neox_sharded_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox_sharded, response_snapshot): response = await neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox_sharded, generate_load, response_snapshot): responses = await generate_load( neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests
hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama_gptq.py
import pytest @pytest.fixture(scope="module") def flash_llama_gptq_handle(launcher): with launcher("huggingface/llama-7b-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_gptq(flash_llama_gptq_handle): await flash_llama_gptq_handle.health(300) return flash_llama_gptq_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_load( flash_llama_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_llama_gptq, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.09375, "text": "ometric" }, { "id": 81, "logprob": -0.25976562, "text": "_" }, { "id": 6009, "logprob": -2.2148438, "text": "mean" }, { "id": 26, "logprob": -0.3010254, "text": "(" }, { "id": 62, "logprob": -5.6757812, "text": "L" }, { "id": 44, "logprob": -3.0898438, "text": ":" }, { "id": 1682, "logprob": -0.6791992, "text": " List" }, { "id": 77, "logprob": -0.38891602, "text": "[" }, { "id": 1808, "logprob": -0.92041016, "text": "float" }, { "id": 10794, "logprob": -2.5390625, "text": "]):" } ], "seed": 0, "tokens": [ { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 442, "logprob": 0.0, "special": false, "text": " return" }, { "id": 11665, "logprob": -1.6005859, "special": false, "text": " reduce" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 5962, "logprob": 0.0, "special": false, "text": "lambda" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 44, "logprob": 0.0, "special": false, "text": ":" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 319, "logprob": 0.0, "special": false, "text": " *" }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 498, "logprob": 0.0, "special": false, "text": " L" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": -0.11968994, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 3226, "logprob": 0.0, "special": false, "text": " ge" }, { "id": 21017, "logprob": 0.0, "special": false, "text": "ometric" } ] }, "generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json
{ "generated_text": "\n return sum(L) / len(L)\n\n\ndef geometric_mean(L", "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "seed": null, "prefill": [ { "id": 589, "text": "def", "logprob": null }, { "id": 3226, "text": " ge", "logprob": -9.0234375 }, { "id": 21017, "text": "ometric", "logprob": -9.0859375 }, { "id": 81, "text": "_", "logprob": -0.25878906 }, { "id": 6009, "text": "mean", "logprob": -2.2109375 }, { "id": 26, "text": "(", "logprob": -0.30371094 }, { "id": 62, "text": "L", "logprob": -5.6054688 }, { "id": 44, "text": ":", "logprob": -3.0722656 }, { "id": 1682, "text": " List", "logprob": -0.6879883 }, { "id": 77, "text": "[", "logprob": -0.38500977 }, { "id": 1808, "text": "float", "logprob": -0.984375 }, { "id": 10794, "text": "]):", "logprob": -2.5351562 } ], "tokens": [ { "id": 284, "text": "\n ", "logprob": -1.1738281, "special": false }, { "id": 442, "text": " return", "logprob": -0.95947266, "special": false }, { "id": 3632, "text": " sum", "logprob": -1.4199219, "special": false }, { "id": 26, "text": "(", "logprob": -0.085876465, "special": false }, { "id": 62, "text": "L", "logprob": -0.09875488, "special": false }, { "id": 27, "text": ")", "logprob": -0.30517578, "special": false }, { "id": 517, "text": " /", "logprob": -0.42089844, "special": false }, { "id": 2069, "text": " len", "logprob": -0.042053223, "special": false }, { "id": 26, "text": "(", "logprob": -0.0011806488, "special": false }, { "id": 62, "text": "L", "logprob": -0.0005259514, "special": false }, { "id": 27, "text": ")", "logprob": -0.0017633438, "special": false }, { "id": 478, "text": "\n\n", "logprob": -0.69189453, "special": false }, { "id": 203, "text": "\n", "logprob": -0.041870117, "special": false }, { "id": 589, "text": "def", "logprob": -0.27856445, "special": false }, { "id": 3226, "text": " ge", "logprob": -1.7255859, "special": false }, { "id": 21017, "text": "ometric", "logprob": -0.011291504, "special": false }, { "id": 81, "text": "_", "logprob": -0.008430481, "special": false }, { "id": 6009, "text": "mean", "logprob": -0.025787354, "special": false }, { "id": 26, "text": "(", "logprob": -0.073913574, "special": false }, { "id": 62, "text": "L", "logprob": -0.09967041, "special": false } ] } }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91796875, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3291016, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.097717285, "special": false, "text": "L" }, { "id": 27, "logprob": -0.29003906, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.03829956, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011987686, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25878906, "text": "_" }, { "id": 6009, "logprob": -2.2109375, "text": "mean" }, { "id": 26, "logprob": -0.30371094, "text": "(" }, { "id": 62, "logprob": -5.6054688, "text": "L" }, { "id": 44, "logprob": -3.0722656, "text": ":" }, { "id": 1682, "logprob": -0.6879883, "text": " List" }, { "id": 77, "logprob": -0.38500977, "text": "[" }, { "id": 1808, "logprob": -0.984375, "text": "float" }, { "id": 10794, "logprob": -2.5351562, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1738281, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9584961, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.4169922, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.085876465, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0982666, "special": false, "text": "L" }, { "id": 27, "logprob": -0.3022461, "special": false, "text": ")" }, { "id": 517, "logprob": -0.40504883, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.041656494, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011844635, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005264282, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.9165039, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.328125, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.07946777, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09820557, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28930664, "special": false, "text": ")" }, { "id": 517, "logprob": -0.34592773, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038330078, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011940002, "special": false, "text": "(" }, { "id": 62, "logprob": -0.00050878525, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25927734, "text": "_" }, { "id": 6009, "logprob": -2.25, "text": "mean" }, { "id": 26, "logprob": -0.30126953, "text": "(" }, { "id": 62, "logprob": -5.7539062, "text": "L" }, { "id": 44, "logprob": -3.0878906, "text": ":" }, { "id": 1682, "logprob": -0.6845703, "text": " List" }, { "id": 77, "logprob": -0.3918457, "text": "[" }, { "id": 1808, "logprob": -0.8798828, "text": "float" }, { "id": 10794, "logprob": -2.4980469, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1533203, "special": false, "text": "\n " }, { "id": 442, "logprob": -0.91259766, "special": false, "text": " return" }, { "id": 3632, "logprob": -1.3251953, "special": false, "text": " sum" }, { "id": 26, "logprob": -0.08062744, "special": false, "text": "(" }, { "id": 62, "logprob": -0.09906006, "special": false, "text": "L" }, { "id": 27, "logprob": -0.28979492, "special": false, "text": ")" }, { "id": 517, "logprob": -0.35958984, "special": false, "text": " /" }, { "id": 2069, "logprob": -0.038604736, "special": false, "text": " len" }, { "id": 26, "logprob": -0.0011901855, "special": false, "text": "(" }, { "id": 62, "logprob": -0.0005078316, "special": false, "text": "L" } ] }, "generated_text": "\n return sum(L) / len(L" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 5, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": 0, "tokens": [ { "id": 5229, "logprob": -2.5683594, "special": false, "text": " failed" }, { "id": 29901, "logprob": -0.45336914, "special": false, "text": ":" }, { "id": 4829, "logprob": -1.8408203, "special": false, "text": " Error" }, { "id": 297, "logprob": -1.0556641, "special": false, "text": " in" }, { "id": 1243, "logprob": 0.0, "special": false, "text": " test" } ] }, "generated_text": "Test requestfailed: Error in test" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5380859, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5859375, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2695312, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03439331, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36694336, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013114929, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1542969, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43847656, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9433594, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5322266, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5585938, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.265625, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034088135, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96240234, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36816406, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013191223, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.15625, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43774414, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9443359, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.5546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5380859, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5917969, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2773438, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.034362793, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.96533203, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.36669922, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013122559, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1503906, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43652344, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9404297, "special": false, "text": "1" } ] }, "generated_text": "for /api/v1/projects/1" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 15, "logprob": null, "text": "," }, { "id": 1669, "logprob": -5.4414062, "text": " il" }, { "id": 11580, "logprob": -2.3378906, "text": " faut" }, { "id": 3913, "logprob": -4.3554688, "text": " tout" }, { "id": 39261, "logprob": -2.9238281, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 408, "logprob": -0.07891846, "special": false, "text": " que" }, { "id": 366, "logprob": -1.2939453, "special": false, "text": " la" }, { "id": 8769, "logprob": -0.3708496, "special": false, "text": " personne" }, { "id": 1479, "logprob": -2.2871094, "special": false, "text": " qui" }, { "id": 2997, "logprob": -0.8671875, "special": false, "text": " vous" }, { "id": 35977, "logprob": -1.5097656, "special": false, "text": " suit" }, { "id": 21558, "logprob": -0.07891846, "special": false, "text": " ait" }, { "id": 447, "logprob": -0.12695312, "special": false, "text": " un" }, { "id": 78606, "logprob": -2.21875, "special": false, "text": " profil" }, { "id": 3899, "logprob": -1.3535156, "special": false, "text": " bien" } ] }, "generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui vous suit ait un profil bien" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6591797, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.4492188, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.6835938, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5175781, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.0078125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5625, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4609375, "text": " ort" }, { "id": 35567, "logprob": -7.5585938, "text": "olan" }, { "id": 15, "logprob": -1.4003906, "text": "," }, { "id": 1669, "logprob": -1.5673828, "text": " il" }, { "id": 11580, "logprob": -0.94628906, "text": " faut" }, { "id": 3913, "logprob": -3.703125, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7646484, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6113281, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5263672, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.2119141, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.40844727, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037841797, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0195312, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.53125, "text": " dég" }, { "id": 21543, "logprob": -0.14770508, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.4140625, "text": " ort" }, { "id": 35567, "logprob": -7.5234375, "text": "olan" }, { "id": 15, "logprob": -1.3613281, "text": "," }, { "id": 1669, "logprob": -1.5458984, "text": " il" }, { "id": 11580, "logprob": -0.94189453, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7548828, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.578125, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5117188, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4707031, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11004639, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4506836, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2119141, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2480469, "special": false, "text": " " }, { "id": 1956, "logprob": -0.33203125, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19250488, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030166626, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 578, "logprob": -1.6474609, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5097656, "special": false, "text": " faire" }, { "id": 159570, "logprob": -6.65625, "special": false, "text": " réch" }, { "id": 810, "logprob": 0.0, "special": false, "text": "au" }, { "id": 12736, "logprob": 0.0, "special": false, "text": "ffer" }, { "id": 1742, "logprob": -2.5859375, "special": false, "text": " au" }, { "id": 6105, "logprob": -2.03125, "special": false, "text": " bain" }, { "id": 88254, "logprob": -0.12695312, "special": false, "text": "-mar" }, { "id": 641, "logprob": 0.0, "special": false, "text": "ie" }, { "id": 2940, "logprob": -3.5175781, "special": false, "text": " avec" } ] }, "generated_text": " le faire réchauffer au bain-marie avec" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7529297, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6054688, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5283203, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4716797, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11853027, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.41210938, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037765503, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0166016, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3183594, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.32617188, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5742188, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6015625, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.67822266, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5395508, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8623047, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6020508, "special": false, "text": "field" }, { "id": 273, "logprob": -0.0552063, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0742188, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011405945, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9165039, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4501953, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4960938, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.02116394, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5, "text": " is" }, { "id": 18147, "logprob": -8.984375, "text": " Deep" }, { "id": 20727, "logprob": -1.96875, "text": " Learning" }, { "id": 32, "logprob": -0.93359375, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5800781, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3242188, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.31835938, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5644531, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.5957031, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.68603516, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5258789, "special": false, "text": " a" }, { "id": 749, "logprob": -1.859375, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6166992, "special": false, "text": "field" }, { "id": 273, "logprob": -0.056762695, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0703125, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.011428833, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9213867, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4726562, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.5039062, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021652222, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3105469, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.3215332, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5566406, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6074219, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.6923828, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5263672, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8544922, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6118164, "special": false, "text": "field" }, { "id": 273, "logprob": -0.055877686, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0537109, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.0115737915, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9111328, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4589844, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4853516, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021636963, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": 0, "tokens": [ { "id": 29899, "logprob": -1.1640625, "special": false, "text": "-" }, { "id": 1454, "logprob": -0.07543945, "special": false, "text": "for" }, { "id": 29899, "logprob": 0.0, "special": false, "text": "-" }, { "id": 9342, "logprob": 0.0, "special": false, "text": "comment" }, { "id": 29901, "logprob": 0.0, "special": false, "text": ":" }, { "id": 396, "logprob": -0.2956543, "special": false, "text": " #" }, { "id": 29906, "logprob": -0.52734375, "special": false, "text": "2" }, { "id": 29900, "logprob": -0.6899414, "special": false, "text": "0" }, { "id": 29896, "logprob": 0.0, "special": false, "text": "1" }, { "id": 29946, "logprob": -1.5068359, "special": false, "text": "4" } ] }, "generated_text": "Test request-for-comment: #2014" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.59375, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3867188, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6367188, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056121826, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01600647, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87939453, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7529297, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2980957, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8105469, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6513672, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056365967, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016082764, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.87841797, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7548828, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29711914, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3828125, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.828125, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6386719, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.6542969, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.055877686, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016021729, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8769531, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7583008, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29833984, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.671875, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3847656, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8144531, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0527344, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.056243896, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.016143799, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8808594, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.75341797, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.2956543, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": null, "tokens": [ { "id": 29918, "logprob": -2.3769531, "special": false, "text": "_" }, { "id": 5338, "logprob": -2.8183594, "special": false, "text": "uri" }, { "id": 13, "logprob": -1.6396484, "special": false, "text": "\n" }, { "id": 3057, "logprob": -1.0546875, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.65478516, "special": false, "text": " request" }, { "id": 29918, "logprob": -0.05557251, "special": false, "text": "_" }, { "id": 5338, "logprob": -0.01612854, "special": false, "text": "uri" }, { "id": 13, "logprob": -0.8730469, "special": false, "text": "\n" }, { "id": 3057, "logprob": -0.7519531, "special": false, "text": "Test" }, { "id": 2009, "logprob": -0.29785156, "special": false, "text": " request" } ] }, "generated_text": "_uri\nTest request_uri\nTest request" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5498047, "special": false, "text": " word" }, { "id": 346, "logprob": -0.04815674, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002313614, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.2636185e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010147095, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0859375, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12609863, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016601562, "special": false, "text": " used" }, { "id": 275, "logprob": -0.38256836, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -9.059906e-06, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00096797943, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.07940674, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12182617, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017227173, "special": false, "text": " used" }, { "id": 275, "logprob": -0.44482422, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1640625, "text": " a" }, { "id": 1167, "logprob": -5.40625, "text": " mem" }, { "id": 70, "logprob": -0.005420685, "text": "e" }, { "id": 13, "logprob": -7.2226562, "text": "," }, { "id": 285, "logprob": -0.26879883, "text": " and" }, { "id": 752, "logprob": -2.1992188, "text": " what" }, { "id": 434, "logprob": -5.46875, "text": "'s" }, { "id": 253, "logprob": -0.8017578, "text": " the" }, { "id": 2892, "logprob": -6.6796875, "text": " history" }, { "id": 3212, "logprob": -2.1972656, "text": " behind" }, { "id": 436, "logprob": -11.4453125, "text": " this" }, { "id": 3159, "logprob": -2.1933594, "text": " word" }, { "id": 32, "logprob": -0.007858276, "text": "?" }, { "id": 0, "logprob": -2.328125, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.21875, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.6201172, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.546875, "special": false, "text": " word" }, { "id": 346, "logprob": -0.051879883, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.0020179749, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.04904175e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009560585, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.08557129, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12084961, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.4025879, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5449219, "special": false, "text": " word" }, { "id": 346, "logprob": -0.05038452, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002292633, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3828278e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010242462, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.090270996, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12719727, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016571045, "special": false, "text": " used" }, { "id": 275, "logprob": -0.43432617, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2590332, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39379883, "special": false, "text": " print" }, { "id": 440, "logprob": -0.61376953, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.47338867, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.80810547, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7397461, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0371094, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 60, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6328125, "text": " print" }, { "id": 81, "logprob": -1.6035156, "text": "_" }, { "id": 7656, "logprob": -5.9882812, "text": "hello" } ], "seed": 0, "tokens": [ { "id": 2262, "logprob": -0.042999268, "special": false, "text": "():" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -0.38549805, "special": false, "text": " World" }, { "id": 657, "logprob": -0.5229492, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.10632324, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": -0.20141602, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": -0.16027832, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 11442, "logprob": 0.0, "special": false, "text": " age" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 313, "logprob": -0.6328125, "special": false, "text": " \"" }, { "id": 313, "logprob": -1.7011719, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 596, "logprob": 0.0, "special": false, "text": " str" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 490, "logprob": 0.0, "special": false, "text": "))" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5390625, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002090454, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3589859e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009455681, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.088012695, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12585449, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017196655, "special": false, "text": " used" }, { "id": 275, "logprob": -0.49731445, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5488281, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.00207901, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.335144e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.00097227097, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.0892334, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12463379, "special": false, "text": " first" }, { "id": 908, "logprob": -0.01737976, "special": false, "text": " used" }, { "id": 275, "logprob": -0.50341797, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2402344, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3725586, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.67822266, "special": false, "text": " which" }, { "id": 310, "logprob": -1.3837891, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7050781, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.052001953, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.1054688, "text": " your" }, { "id": 12315, "logprob": -9.953125, "text": " mood" }, { "id": 3063, "logprob": -4.0820312, "text": " today" }, { "id": 32, "logprob": -0.15148926, "text": "?" }, { "id": 50279, "logprob": -0.27026367, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.88378906, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9819336, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2421875, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3474121, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.078125, "special": false, "text": "," }, { "id": 534, "logprob": -0.69140625, "special": false, "text": " which" }, { "id": 310, "logprob": -1.4072266, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7041016, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.053375244, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0351562, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.234375, "text": "'s" }, { "id": 634, "logprob": -5.21875, "text": " your" }, { "id": 12315, "logprob": -9.9375, "text": " mood" }, { "id": 3063, "logprob": -4.1015625, "text": " today" }, { "id": 32, "logprob": -0.15319824, "text": "?" }, { "id": 50279, "logprob": -0.2614746, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8886719, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.98046875, "special": false, "text": "'m" }, { "id": 417, "logprob": -2.2265625, "special": false, "text": " not" }, { "id": 2119, "logprob": -0.3479004, "special": false, "text": " sure" }, { "id": 13, "logprob": -1.0117188, "special": false, "text": "," }, { "id": 534, "logprob": -0.67871094, "special": false, "text": " which" }, { "id": 310, "logprob": -1.421875, "special": false, "text": " is" }, { "id": 253, "logprob": -1.7382812, "special": false, "text": " the" }, { "id": 1682, "logprob": -0.051330566, "special": false, "text": " best" }, { "id": 1039, "logprob": -2.0390625, "special": false, "text": " way" } ] }, "generated_text": "I'm not sure, which is the best way" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1953125, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8828125, "text": " mood" }, { "id": 3063, "logprob": -3.9980469, "text": " today" }, { "id": 32, "logprob": -0.14672852, "text": "?" }, { "id": 50279, "logprob": -0.26489258, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.8618164, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.9506836, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1738281, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.0758667, "special": false, "text": "," }, { "id": 1394, "logprob": -0.9135742, "special": false, "text": "You" }, { "id": 452, "logprob": -1.1445312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4375, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.1103516, "special": false, "text": " choice" }, { "id": 273, "logprob": -1.0058594, "special": false, "text": " of" }, { "id": 752, "logprob": -1.921875, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|USER|>" }, { "id": 1276, "logprob": -4.5546875, "text": "What" }, { "id": 434, "logprob": -4.1992188, "text": "'s" }, { "id": 634, "logprob": -5.125, "text": " your" }, { "id": 12315, "logprob": -9.8984375, "text": " mood" }, { "id": 3063, "logprob": -4.0976562, "text": " today" }, { "id": 32, "logprob": -0.14562988, "text": "?" }, { "id": 50279, "logprob": -0.26733398, "text": "<|ASSISTANT|>" } ], "seed": null, "tokens": [ { "id": 42, "logprob": -0.86279297, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1835938, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.074035645, "special": false, "text": "," }, { "id": 1394, "logprob": -0.86376953, "special": false, "text": "You" }, { "id": 452, "logprob": -1.2070312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4365234, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.109375, "special": false, "text": " choice" }, { "id": 273, "logprob": -0.93408203, "special": false, "text": " of" }, { "id": 752, "logprob": -1.8808594, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 9, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 16017, "logprob": -0.30908203, "special": false, "text": " blue" }, { "id": 20495, "logprob": 0.0, "special": false, "text": " sky" }, { "id": 259, "logprob": -0.28271484, "special": false, "text": " " }, { "id": 15484, "logprob": -1.7929688, "special": false, "text": "appear" }, { "id": 345, "logprob": -0.8935547, "special": false, "text": "ed" }, { "id": 281, "logprob": 0.0, "special": false, "text": " in" }, { "id": 287, "logprob": 0.0, "special": false, "text": " the" }, { "id": 20495, "logprob": -0.32299805, "special": false, "text": " sky" }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "Why is the sky blue?blue sky appeared in the sky" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3798828, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36328125, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0947266, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8286133, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6826172, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.7290039, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 5, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 926, "logprob": -4.3554688, "special": false, "text": " To" }, { "id": 18295, "logprob": -7.7734375, "special": false, "text": " sell" }, { "id": 7868, "logprob": -3.9257812, "special": false, "text": " things" }, { "id": 260, "logprob": -2.4179688, "special": false, "text": "." }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "To sell things." }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6132812, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.078125, "text": "ron" }, { "id": 304, "logprob": -2.3261719, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07556152, "text": "aff" }, { "id": 255, "logprob": -0.0067749023, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.734375, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.004432678, "text": " of" }, { "id": 414, "logprob": -1.9677734, "text": " this" }, { "id": 6490, "logprob": -2.046875, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2753906, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20874023, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5664062, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7675781, "text": " are" }, { "id": 23981, "logprob": -5.0, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103637695, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6835938, "text": " glorious" }, { "id": 64398, "logprob": -1.8173828, "text": " majesty" }, { "id": 275, "logprob": -0.23510742, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24633789, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17333984, "text": "." }, { "id": 193, "logprob": -1.3935547, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10809326, "text": "af" }, { "id": 249, "logprob": -0.042663574, "text": "at" }, { "id": 1480, "logprob": -0.0024776459, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1015625, "text": "\n" }, { "id": 50, "logprob": -0.05709839, "text": "G" }, { "id": 330, "logprob": -0.13208008, "text": "ir" }, { "id": 1622, "logprob": -0.0071487427, "text": "af" }, { "id": 249, "logprob": -0.008468628, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074691772, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3173828, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23803711, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.56933594, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.41967773, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023403168, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007904053, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.04837036, "text": " with" }, { "id": 26680, "logprob": -3.9960938, "text": " gir" }, { "id": 1903, "logprob": -0.07525635, "text": "aff" }, { "id": 255, "logprob": -0.006790161, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1845703, "text": " on" }, { "id": 248, "logprob": -0.77734375, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.0044403076, "text": " of" }, { "id": 414, "logprob": -1.9667969, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.921875, "text": " G" }, { "id": 6013, "logprob": -2.2714844, "text": "ira" }, { "id": 694, "logprob": -0.62353516, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5625, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21899414, "text": " animals" }, { "id": 362, "logprob": -0.76708984, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103515625, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6796875, "text": " glorious" }, { "id": 64398, "logprob": -1.8222656, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.3544922, "text": " the" }, { "id": 26680, "logprob": -0.24609375, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17358398, "text": "." }, { "id": 193, "logprob": -1.3925781, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.5898438, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2304688, "text": " Gir" }, { "id": 1622, "logprob": -0.107788086, "text": "af" }, { "id": 249, "logprob": -0.04257202, "text": "at" }, { "id": 1480, "logprob": -0.0024871826, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056915283, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.0071105957, "text": "af" }, { "id": 249, "logprob": -0.008453369, "text": "at" }, { "id": 1480, "logprob": -0.0006928444, "text": "ron" }, { "id": 37, "logprob": -0.0074920654, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.828125, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3178711, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23925781, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.4177246, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023345947, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5283203, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007965088, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" } ]
0
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 330, "logprob": null, "text": "ir" }, { "id": 1622, "logprob": -7.8125, "text": "af" }, { "id": 249, "logprob": -4.5, "text": "at" }, { "id": 1480, "logprob": -10.875, "text": "ron" }, { "id": 37, "logprob": -3.6875, "text": ":" } ], "seed": 0, "tokens": [ { "id": 836, "logprob": -1.265625, "special": false, "text": " i" }, { "id": 18, "logprob": -0.119628906, "special": false, "text": "'" }, { "id": 298, "logprob": -2.265625, "special": false, "text": "ve" }, { "id": 650, "logprob": -0.49804688, "special": false, "text": " been" }, { "id": 1241, "logprob": 0.0, "special": false, "text": " using" }, { "id": 334, "logprob": 0.0, "special": false, "text": " it" }, { "id": 312, "logprob": -1.2421875, "special": false, "text": " for" }, { "id": 909, "logprob": -0.99609375, "special": false, "text": " years" }, { "id": 193, "logprob": -0.30273438, "special": false, "text": "\n" }, { "id": 807, "logprob": -1.078125, "special": false, "text": "ik" } ] }, "generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik" }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/proto/generate.proto
syntax = "proto3"; package generate.v1; service TextGenerationService { /// Model Info rpc Info (InfoRequest) returns (InfoResponse) {} /// Service discovery rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {} /// Empties batch cache rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse); /// Remove requests from a cached batch rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse); /// Warmup the model and compute max cache size rpc Warmup (WarmupRequest) returns (WarmupResponse); /// Prefill batch and decode first token rpc Prefill (PrefillRequest) returns (PrefillResponse); /// Decode token for a list of prefilled batches rpc Decode (DecodeRequest) returns (DecodeResponse); /// Health check rpc Health (HealthRequest) returns (HealthResponse); } message HealthRequest {} message HealthResponse {} /// Empty request message InfoRequest {} message InfoResponse { bool requires_padding = 1; string dtype = 2; string device_type = 3; } /// Empty request message ServiceDiscoveryRequest {} message ServiceDiscoveryResponse { /// Other shards urls repeated string urls = 1; } message ClearCacheRequest { /// Optional batch id optional uint64 id = 1; } /// Empty response message ClearCacheResponse {} message NextTokenChooserParameters { /// exponential scaling output probability distribution float temperature = 1; /// restricting to the k highest probability elements uint32 top_k = 2; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float top_p = 3; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float typical_p = 4; /// apply sampling on the logits bool do_sample = 5; /// random seed for sampling uint64 seed = 6; /// repetition penalty float repetition_penalty = 7; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; } message StoppingCriteriaParameters { /// Maximum number of generated tokens uint32 max_new_tokens = 1; /// Optional stopping sequences repeated string stop_sequences = 2; /// Ignore end of sequence token /// used for benchmarking bool ignore_eos_token = 3; } message Request { /// Request ID uint64 id = 1; /// The generation context string inputs = 2; /// Context truncation uint32 truncate = 3; /// Next Token Chooser Parameters NextTokenChooserParameters parameters = 4; /// Stopping Criteria Parameters StoppingCriteriaParameters stopping_parameters = 5; /// Return prefill logprobs bool prefill_logprobs = 6; } message Batch { /// Batch ID uint64 id = 1; /// Individual requests repeated Request requests = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } message CachedBatch { /// Batch ID uint64 id = 1; /// Individual requests ids repeated uint64 request_ids = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } enum FinishReason { FINISH_REASON_LENGTH = 0; FINISH_REASON_EOS_TOKEN = 1; FINISH_REASON_STOP_SEQUENCE = 2; } message GeneratedText { /// Output string text = 1; /// Number of generated tokens uint32 generated_tokens = 2; /// Finish reason FinishReason finish_reason = 3; /// Seed optional uint64 seed = 4; } message PrefillTokens { /// Prefill Token IDs repeated uint32 ids = 1; /// Prefill Logprobs repeated float logprobs = 2; /// Prefill tokens repeated string texts = 3; } message Generation { /// Request ID uint64 request_id = 1; /// Prefill tokens (optional) PrefillTokens prefill_tokens = 2; /// Token ID uint32 token_id = 3; /// Logprob float token_logprob = 4; /// Text string token_text = 5; /// Is it a special token bool token_is_special = 6; /// Complete generated text optional GeneratedText generated_text = 7; } message FilterBatchRequest { /// Batch ID uint64 batch_id = 1; /// Requests to keep repeated uint64 request_ids = 2; } message FilterBatchResponse { /// Filtered Batch (cached) CachedBatch batch = 1; } message PrefillRequest { /// Batch Batch batch = 1; } message PrefillResponse { /// Generation repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; } message DecodeRequest { /// Cached batches repeated CachedBatch batches = 1; } message DecodeResponse { /// Decodes repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; } message WarmupRequest { /// Batch to warmup on Batch batch = 1; } /// Empty response message WarmupResponse { /// Maximum number of tokens supported by the model optional uint32 max_supported_total_tokens = 1; }
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/README.md
<div align="center"> # Text Generation Inference benchmarking tool ![benchmark](../assets/benchmark.png) </div> A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). ## Install ```shell make install-benchmark ``` ## Run First, start `text-generation-inference`: ```shell text-generation-launcher --model-id bigscience/bloom-560m ``` Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m ```
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/Cargo.toml
[package] name = "text-generation-benchmark" description = "Text Generation Benchmarking tool" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-benchmark" path = "src/main.rs" [dependencies] average = "0.13" clap = { version = "4.1.4", features = ["derive", "env"] } crossterm = "0.26" float-ord = "0.3.2" serde = {version = "1.0.142", features = ["derive"]} serde_json = "1.0" tabled = "0.12.0" text-generation-client = { path = "../router/client" } thiserror = "1.0.38" tokenizers = "0.13.3" tokio = { version = "1.25.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tui = {package = "ratatui", version = "0.20", default-features = false, features = ["crossterm"]} tracing = "0.1.37" tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/main.rs
/// Text Generation Inference benchmarking tool /// /// Inspired by the great Oha app: https://github.com/hatoo/oha /// and: https://github.com/orhun/rust-tui-template use clap::Parser; use std::path::Path; use text_generation_client::ShardedClient; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the tokenizer (as in model_id on the huggingface hub, or local path). #[clap(short, long, env)] tokenizer_name: String, /// The revision to use for the tokenizer if on the hub. #[clap(default_value = "main", long, env)] revision: String, /// The various batch sizes to benchmark for, the idea is to get enough /// batching to start seeing increased latency, this usually means you're /// moving from memory bound (usual as BS=1) to compute bound, and this is /// a sweet spot for the maximum batch size for the model under test #[clap(short, long)] batch_size: Option<Vec<u32>>, /// This is the initial prompt sent to the text-generation-server length /// in token. Longer prompt will slow down the benchmark. Usually the /// latency grows somewhat linearly with this for the prefill step. /// /// Most importantly, the prefill step is usually not the one dominating /// your runtime, so it's ok to keep it short. #[clap(default_value = "10", short, long, env)] sequence_length: u32, /// This is how many tokens will be generated by the server and averaged out /// to give the `decode` latency. This is the *critical* number you want to optimize for /// LLM spend most of their time doing decoding. /// /// Decode latency is usually quite stable. #[clap(default_value = "8", short, long, env)] decode_length: u32, ///How many runs should we average from #[clap(default_value = "10", short, long, env)] runs: usize, /// Number of warmup cycles #[clap(default_value = "1", short, long, env)] warmups: usize, /// The location of the grpc socket. This benchmark tool bypasses the router /// completely and directly talks to the gRPC processes #[clap(default_value = "/tmp/text-generation-server-0", short, long, env)] master_shard_uds_path: String, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] temperature: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_k: Option<u32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] typical_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] repetition_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] watermark: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] do_sample: bool, } fn main() -> Result<(), Box<dyn std::error::Error>> { init_logging(); // Get args let args = Args::parse(); // Pattern match configuration let Args { tokenizer_name, revision, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, master_shard_uds_path, } = args; let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]); // Tokenizer instance // This will only be used to validate payloads tracing::info!("Loading tokenizer"); let local_path = Path::new(&tokenizer_name); let tokenizer = if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists() { // Load local tokenizer tracing::info!("Found local tokenizer"); Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap() } else { tracing::info!("Downloading tokenizer"); // Parse Huggingface hub token let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision, auth_token, ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() }; tracing::info!("Tokenizer loaded"); // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { // Instantiate sharded client from the master unix socket tracing::info!("Connect to model server"); let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .expect("Could not connect to server"); // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .expect("Unable to clear cache"); tracing::info!("Connected"); // Run app text_generation_benchmark::run( tokenizer_name, tokenizer, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, sharded_client, ) .await .unwrap(); }); Ok(()) } /// Init logging using LOG_LEVEL fn init_logging() { // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(fmt_layer) .init(); }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/utils.rs
/// MIT License // // Copyright (c) 2020 hatoo // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. use std::collections::BTreeMap; pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> { assert!(bins >= 2); let mut bucket: Vec<usize> = vec![0; bins]; let min = values.iter().collect::<average::Min>().min(); let max = values.iter().collect::<average::Max>().max(); let step = (max - min) / (bins - 1) as f64; for &v in values { let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1); bucket[i] += 1; } bucket .into_iter() .enumerate() .map(|(i, v)| (min + step * i as f64, v)) .collect() } pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> { pecents .iter() .map(|&p| { let i = (f64::from(p) / 100.0 * values.len() as f64) as usize; (format!("p{p}"), *values.get(i).unwrap_or(&std::f64::NAN)) }) .collect() }
0