text
stringlengths 7
328k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
459
|
---|---|---|---|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
General namespace and dataclass related classes
"""
import argparse
import copy
import enum
import functools
import os
import typing
import warnings
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import timedelta
from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, get_args
import torch
from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE
from .environment import str_to_bool
from .imports import is_cuda_available, is_npu_available, is_xpu_available
from .versions import compare_versions
class KwargsHandler:
"""
Internal mixin that implements a `to_kwargs()` method for a dataclass.
"""
def to_dict(self):
return copy.deepcopy(self.__dict__)
def to_kwargs(self):
"""
Returns a dictionary containing the attributes with values different from the default of this class.
"""
# import clear_environment here to avoid circular import problem
from .other import clear_environment
with clear_environment():
default_dict = self.__class__().to_dict()
this_dict = self.to_dict()
return {k: v for k, v in this_dict.items() if default_dict[k] != v}
@dataclass
class AutocastKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
information on each argument.
Example:
```python
from accelerate import Accelerator
from accelerate.utils import AutocastKwargs
kwargs = AutocastKwargs(cache_enabled=True)
accelerator = Accelerator(kwargs_handlers=[kwargs])
```
"""
enabled: bool = True
cache_enabled: bool = None
@dataclass
class DistributedDataParallelKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize how your model is wrapped in a
`torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
[wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
information on each argument.
<Tip warning={true}>
`gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
`static_graph` is only available in PyTorch 1.11.0 and later versions.
</Tip>
Example:
```python
from accelerate import Accelerator
from accelerate.utils import DistributedDataParallelKwargs
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[kwargs])
```
"""
dim: int = 0
broadcast_buffers: bool = True
bucket_cap_mb: int = 25
find_unused_parameters: bool = False
check_reduction: bool = False
gradient_as_bucket_view: bool = False
static_graph: bool = False
@dataclass
class GradScalerKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
`torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
[scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
<Tip warning={true}>
`GradScaler` is only available in PyTorch 1.5.0 and later versions.
</Tip>
Example:
```python
from accelerate import Accelerator
from accelerate.utils import GradScalerKwargs
kwargs = GradScalerKwargs(backoff_filter=0.25)
accelerator = Accelerator(kwargs_handlers=[kwargs])
```
"""
init_scale: float = 65536.0
growth_factor: float = 2.0
backoff_factor: float = 0.5
growth_interval: int = 2000
enabled: bool = True
@dataclass
class InitProcessGroupKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
to the documentation of this
[method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
information on each argument.
```python
from datetime import timedelta
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
accelerator = Accelerator(kwargs_handlers=[kwargs])
```
"""
backend: Optional[str] = "nccl"
init_method: Optional[str] = None
timeout: timedelta = timedelta(seconds=1800)
# Literals
Backend = Literal["MSAMP", "TE"]
OptLevel = Literal["O1", "O2"]
FP8Format = Literal["E4M3", "HYBRID"]
AmaxComputeAlgorithm = Literal["max", "most_recent"]
@dataclass
class FP8RecipeKwargs(KwargsHandler):
"""
Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
training with `transformer-engine` or `ms-amp`.
<Tip>
For more information on `transformer-engine` args, please refer to the API
[documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
For more information on the `ms-amp` args, please refer to the Optimization Level
[documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
</Tip>
```python
from accelerate import Accelerator
from accelerate.utils import FP8RecipeKwargs
kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
```
To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
```python
kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
```
Args:
backend (`str`, *optional*, defaults to "msamp"):
Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
margin (`int`, *optional*, default to 0):
The margin to use for the gradient scaling.
interval (`int`, *optional*, default to 1):
The interval to use for how often the scaling factor is recomputed.
fp8_format (`str`, *optional*, default to "E4M3"):
The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
amax_history_len (`int`, *optional*, default to 1024):
The length of the history to use for the scaling factor computation
amax_compute_algo (`str`, *optional*, default to "most_recent"):
The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`):
Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
optimization_level (`str`), one of `O1`, `O2`. (default is `O2`):
What level of 8-bit collective communication should be used with MS-AMP. In general:
* O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU
memory usage and communication bandwidth
* O2: First-order optimizer states are in 8-bit, and second order states are in FP16.
Only available when using Adam or AdamW. This maintains accuracy and can potentially save the
highest memory.
* 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models
are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
available currently).
"""
backend: Backend = "MSAMP"
opt_level: OptLevel = "O2"
margin: int = 0
interval: int = 1
fp8_format: FP8Format = "E4M3"
amax_history_len: int = 1
amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
def __post_init__(self):
if self.backend.upper() not in get_args(Backend):
raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).")
self.backend = self.backend.upper()
# Check TE args
if self.backend == "TE":
self.fp8_format = self.fp8_format.upper()
if self.fp8_format not in get_args(FP8Format):
raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.")
if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm):
raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}")
elif self.backend == "MSAMP":
if self.opt_level not in get_args(OptLevel):
raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
class EnumWithContains(enum.EnumMeta):
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
def __contains__(cls, item):
try:
cls(item)
except ValueError:
return False
return True
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
"An enum class that can get the value of an item with `str(Enum.key)`"
def __str__(self):
return self.value
@classmethod
def list(cls):
"Method to list all the possible items in `cls`"
return list(map(str, cls))
class DeprecatedFieldDescriptor:
"""
Descriptor for deprecated fields in an enum class.
Args:
field_name (`str`):
The name of the deprecated field.
replaced_with (`str`):
The name of the field that replaces the deprecated one.
"""
def __init__(self, field_name, replaced_with):
self.field_name = field_name
self.replaced_with = replaced_with
def __get__(self, instance, owner):
warnings.warn(
f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. "
f"Please use the `{self.replaced_with}` instead.",
FutureWarning,
)
return getattr(owner, self.replaced_with)
class DistributedType(str, enum.Enum):
"""
Represents a type of distributed environment.
Values:
- **NO** -- Not a distributed environment, just a single process.
- **MULTI_CPU** -- Distributed on multiple CPU nodes.
- **MULTI_GPU** -- Distributed on multiple GPUs.
- **MULTI_MLU** -- Distributed on multiple MLUs.
- **MULTI_NPU** -- Distributed on multiple NPUs.
- **MULTI_XPU** -- Distributed on multiple XPUs.
- **DEEPSPEED** -- Using DeepSpeed.
- **XLA** -- Using TorchXLA.
- **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead.
"""
# Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
NO = "NO"
MULTI_CPU = "MULTI_CPU"
MULTI_GPU = "MULTI_GPU"
MULTI_NPU = "MULTI_NPU"
MULTI_MLU = "MULTI_MLU"
MULTI_XPU = "MULTI_XPU"
DEEPSPEED = "DEEPSPEED"
FSDP = "FSDP"
XLA = "XLA"
MEGATRON_LM = "MEGATRON_LM"
TPU = DeprecatedFieldDescriptor("TPU", "XLA")
class SageMakerDistributedType(str, enum.Enum):
"""
Represents a type of distributed environment.
Values:
- **NO** -- Not a distributed environment, just a single process.
- **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
- **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
"""
# Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
NO = "NO"
DATA_PARALLEL = "DATA_PARALLEL"
MODEL_PARALLEL = "MODEL_PARALLEL"
class ComputeEnvironment(str, enum.Enum):
"""
Represents a type of the compute environment.
Values:
- **LOCAL_MACHINE** -- private/custom cluster hardware.
- **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
"""
# Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
LOCAL_MACHINE = "LOCAL_MACHINE"
AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
class DynamoBackend(str, BaseEnum):
"""
Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html).
Values:
- **NO** -- Do not use torch dynamo.
- **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
issues.
- **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
- **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
kernels. [Read
more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
- **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
- **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
- **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
- **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
- **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
- **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
- **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
more](https://github.com/onnx/onnx-tensorrt)
- **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
more](https://github.com/intel/intel-extension-for-pytorch).
- **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
"""
# Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
NO = "NO"
EAGER = "EAGER"
AOT_EAGER = "AOT_EAGER"
INDUCTOR = "INDUCTOR"
AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
CUDAGRAPHS = "CUDAGRAPHS"
OFI = "OFI"
FX2TRT = "FX2TRT"
ONNXRT = "ONNXRT"
TENSORRT = "TENSORRT"
IPEX = "IPEX"
TVM = "TVM"
class LoggerType(BaseEnum):
"""Represents a type of supported experiment tracker
Values:
- **ALL** -- all available trackers in the environment that are supported
- **TENSORBOARD** -- TensorBoard as an experiment tracker
- **WANDB** -- wandb as an experiment tracker
- **COMETML** -- comet_ml as an experiment tracker
- **DVCLIVE** -- dvclive as an experiment tracker
"""
ALL = "all"
AIM = "aim"
TENSORBOARD = "tensorboard"
WANDB = "wandb"
COMETML = "comet_ml"
MLFLOW = "mlflow"
CLEARML = "clearml"
DVCLIVE = "dvclive"
class PrecisionType(BaseEnum):
"""Represents a type of precision used on floating point values
Values:
- **NO** -- using full precision (FP32)
- **FP16** -- using half precision
- **BF16** -- using brain floating point precision
"""
NO = "no"
FP8 = "fp8"
FP16 = "fp16"
BF16 = "bf16"
class RNGType(BaseEnum):
TORCH = "torch"
CUDA = "cuda"
MLU = "mlu"
NPU = "npu"
XLA = "xla"
XPU = "xpu"
GENERATOR = "generator"
class CustomDtype(enum.Enum):
r"""
An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
"""
FP8 = "fp8"
INT4 = "int4"
INT2 = "int2"
# data classes
@dataclass
class TensorInformation:
shape: torch.Size
dtype: torch.dtype
@dataclass
class DataLoaderConfiguration:
"""
Configuration for dataloader-related items when calling `accelerator.prepare`.
"""
split_batches: bool = field(
default=False,
metadata={
"help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
" `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
" round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
" in your script multiplied by the number of processes."
},
)
dispatch_batches: bool = field(
default=None,
metadata={
"help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
" and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
" underlying dataset is an `IterableDataslet`, `False` otherwise."
},
)
even_batches: bool = field(
default=True,
metadata={
"help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
" dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
" all workers."
},
)
use_seedable_sampler: bool = field(
default=False,
metadata={
"help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])."
"Ensures training results are fully reproducable using a different sampling technique. "
"While seed-to-seed results may differ, on average the differences are neglible when using"
"multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
},
)
@dataclass
class ProjectConfiguration:
"""
Configuration for the Accelerator object based on inner-project needs.
"""
project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
logging_dir: str = field(
default=None,
metadata={
"help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
},
)
automatic_checkpoint_naming: bool = field(
default=False,
metadata={"help": "Whether saved states should be automatically iteratively named."},
)
total_limit: int = field(
default=None,
metadata={"help": "The maximum number of total saved states to keep."},
)
iteration: int = field(
default=0,
metadata={"help": "The current save iteration."},
)
save_on_each_node: bool = field(
default=False,
metadata={
"help": (
"When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
" only on the main one"
)
},
)
def set_directories(self, project_dir: str = None):
"Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
self.project_dir = project_dir
if self.logging_dir is None:
self.logging_dir = project_dir
def __post_init__(self):
self.set_directories(self.project_dir)
@dataclass
class GradientAccumulationPlugin(KwargsHandler):
"""
A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or
`gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error.
Parameters:
num_steps (`int`):
The number of steps to accumulate gradients for.
adjust_scheduler (`bool`, *optional*, defaults to `True`):
Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be
`True` if the used scheduler was not adjusted for gradient accumulation.
sync_with_dataloader (`bool`, *optional*, defaults to `True`):
Whether to synchronize setting the gradients when at the end of the dataloader.
sync_each_batch (`bool`, *optional*):
Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory
requirements when using gradient accumulation with distributed training, at expense of speed.
Example:
```python
from accelerate.utils import GradientAccumulationPlugin
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
```
"""
num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
adjust_scheduler: bool = field(
default=True,
metadata={
"help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
},
)
sync_with_dataloader: bool = field(
default=True,
metadata={
"help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
},
)
sync_each_batch: bool = field(
default=False,
metadata={
"help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed."
},
)
@dataclass
class TorchDynamoPlugin(KwargsHandler):
"""
This plugin is used to compile a model with PyTorch 2.0
"""
backend: DynamoBackend = field(
default=None,
metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
)
mode: str = field(
default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
)
fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
def __post_init__(self):
prefix = "ACCELERATE_DYNAMO_"
if self.backend is None:
self.backend = os.environ.get(prefix + "BACKEND", "no")
self.backend = DynamoBackend(self.backend.upper())
if self.mode is None:
self.mode = os.environ.get(prefix + "MODE", "default")
if self.fullgraph is None:
self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
if self.dynamic is None:
self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
def to_dict(self):
dynamo_config = copy.deepcopy(self.__dict__)
dynamo_config["backend"] = dynamo_config["backend"].value.lower()
return dynamo_config
@dataclass
class DeepSpeedPlugin:
"""
This plugin is used to integrate DeepSpeed.
"""
hf_ds_config: Any = field(
default=None,
metadata={
"help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`."
},
)
gradient_accumulation_steps: int = field(
default=None,
metadata={
"help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
},
)
gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
zero_stage: int = field(
default=None,
metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"},
)
is_train_batch_min: str = field(
default=True,
metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"},
)
offload_optimizer_device: bool = field(
default=None,
metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."},
)
offload_param_device: bool = field(
default=None,
metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."},
)
offload_optimizer_nvme_path: str = field(
default=None,
metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
)
offload_param_nvme_path: str = field(
default=None,
metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
)
zero3_init_flag: bool = field(
default=None,
metadata={
"help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models."
"Only applicable with ZeRO Stage-3."
},
)
zero3_save_16bit_model: bool = field(
default=None,
metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
)
def __post_init__(self):
from .deepspeed import HfDeepSpeedConfig
if self.gradient_accumulation_steps is None:
gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
if self.gradient_clipping is None:
gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
if gradient_clipping != "none":
self.gradient_clipping = float(gradient_clipping)
if self.zero_stage is None:
self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
if self.offload_optimizer_device is None:
self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
if self.offload_param_device is None:
self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
if self.offload_optimizer_nvme_path is None:
self.offload_optimizer_nvme_path = os.environ.get(
"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
)
if self.offload_param_nvme_path is None:
self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
if self.zero3_save_16bit_model is None:
self.zero3_save_16bit_model = (
os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
)
if self.hf_ds_config is None:
self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
if (
isinstance(self.hf_ds_config, dict)
or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
):
if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
if "gradient_accumulation_steps" not in self.hf_ds_config.config:
self.hf_ds_config.config["gradient_accumulation_steps"] = 1
if "zero_optimization" not in self.hf_ds_config.config:
raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
self._deepspeed_config_checks()
plugin_to_config_mapping = {
"gradient_accumulation_steps": "gradient_accumulation_steps",
"gradient_clipping": "gradient_clipping",
"zero_stage": "zero_optimization.stage",
"offload_optimizer_device": "zero_optimization.offload_optimizer.device",
"offload_param_device": "zero_optimization.offload_param.device",
"offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
"offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
"zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
}
kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
for key in kwargs.keys():
self.fill_match(key, **kwargs, must_match=False)
self.hf_ds_config.set_stage_and_offload()
# filling the missing values in the class attributes from the DeepSpeed config
# when using the DeepSpeed config file.
for key, value in plugin_to_config_mapping.items():
config_value = self.hf_ds_config.get_value(value)
if config_value is not None and config_value != "auto":
setattr(self, key, config_value)
else:
config = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": self.gradient_accumulation_steps,
"zero_optimization": {
"stage": self.zero_stage,
"offload_optimizer": {
"device": self.offload_optimizer_device,
"nvme_path": self.offload_optimizer_nvme_path
if self.offload_optimizer_device == "nvme"
else None,
},
"offload_param": {
"device": self.offload_param_device,
"nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
},
"stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
},
}
if self.gradient_clipping:
config["gradient_clipping"] = self.gradient_clipping
self.hf_ds_config = HfDeepSpeedConfig(config)
self.deepspeed_config = self.hf_ds_config.config
self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
if self.zero3_init_flag is None:
self.zero3_init_flag = (
str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
)
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
self.zero3_init_flag = False
def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
mismatches = [] if mismatches is None else mismatches
config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
if config is None:
return
if config.get(ds_key) == "auto":
if ds_key_long in kwargs:
config[ds_key] = kwargs[ds_key_long]
return
else:
raise ValueError(
f"`{ds_key_long}` not found in kwargs. "
f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or "
"pass it in kwargs."
)
if not must_match:
return
ds_val = config.get(ds_key)
if ds_val is not None and ds_key_long in kwargs:
if ds_val != kwargs[ds_key_long]:
mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
def is_auto(self, ds_key_long):
val = self.hf_ds_config.get_value(ds_key_long)
if val is None:
return False
else:
return val == "auto"
def get_value(self, ds_key_long, default=None):
return self.hf_ds_config.get_value(ds_key_long, default)
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
"""Process the DeepSpeed config with the values from the kwargs."""
mismatches = [] if mismatches is None else mismatches
if config is None:
config = self.deepspeed_config
for key, value in config.items():
if isinstance(value, dict):
self.deepspeed_config_process(
prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs
)
else:
self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)
if len(mismatches) > 0 and prefix == "":
mismatches_msg = "\n".join(mismatches)
raise ValueError(
"Please correct the following DeepSpeed config values that mismatch kwargs "
f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
)
def set_mixed_precision(self, mixed_precision):
ds_config = self.deepspeed_config
kwargs = {
"fp16.enabled": mixed_precision == "fp16",
"bf16.enabled": mixed_precision == "bf16",
}
if mixed_precision == "fp16":
if "fp16" not in ds_config:
ds_config["fp16"] = {"enabled": True, "auto_cast": True}
elif mixed_precision == "bf16":
if "bf16" not in ds_config:
ds_config["bf16"] = {"enabled": True}
if mixed_precision != "no":
diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
raise ValueError(
f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
)
for dtype in ["fp16", "bf16"]:
if dtype not in ds_config:
ds_config[dtype] = {"enabled": False}
self.fill_match("fp16.enabled", must_match=False, **kwargs)
self.fill_match("bf16.enabled", must_match=False, **kwargs)
def set_deepspeed_weakref(self):
from .imports import is_transformers_available
if self.zero3_init_flag:
if not is_transformers_available():
raise Exception(
"When `zero3_init_flag` is set, it requires Transformers to be installed. "
"Please run `pip install transformers`."
)
ds_config = copy.deepcopy(self.deepspeed_config)
if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto":
ds_config["gradient_accumulation_steps"] = 1
if (
"train_micro_batch_size_per_gpu" not in ds_config
or ds_config["train_micro_batch_size_per_gpu"] == "auto"
):
ds_config["train_micro_batch_size_per_gpu"] = 1
if ds_config.get("train_batch_size", None) == "auto":
del ds_config["train_batch_size"]
if compare_versions("transformers", "<", "4.33"):
from transformers.deepspeed import HfDeepSpeedConfig
else:
from transformers.integrations import HfDeepSpeedConfig
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
def is_zero3_init_enabled(self):
return self.zero3_init_flag
@contextmanager
def zero3_init_context_manager(self, enable=False):
old = self.zero3_init_flag
if old == enable:
yield
else:
self.zero3_init_flag = enable
self.dschf = None
self.set_deepspeed_weakref()
yield
self.zero3_init_flag = old
self.dschf = None
self.set_deepspeed_weakref()
def _deepspeed_config_checks(self):
env_variable_names_to_ignore = [
"ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
"ACCELERATE_GRADIENT_CLIPPING",
"ACCELERATE_DEEPSPEED_ZERO_STAGE",
"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH",
"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH",
"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
"ACCELERATE_MIXED_PRECISION",
]
env_variable_names_to_ignore = [
name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
]
deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
raise ValueError(
f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
"Please specify them appropriately in the DeepSpeed config file.\n"
"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
"The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
"It will only ask for the necessary config variables when using `deepspeed_config_file`."
)
@dataclass
class FullyShardedDataParallelPlugin:
"""
This plugin is used to enable fully sharded data parallelism.
"""
sharding_strategy: "typing.Any" = field(
default=None,
metadata={
"help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`"
},
)
backward_prefetch: "typing.Any" = field(
default=None,
metadata={
"help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`"
},
)
mixed_precision_policy: "typing.Any" = field(
default=None,
metadata={
"help": "A config to enable mixed precision training with FullyShardedDataParallel. "
"The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. "
"Each flag expects `torch.dtype` as the value. "
"It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`."
},
)
auto_wrap_policy: Optional[Callable] = field(
default=None,
metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"},
)
cpu_offload: "typing.Any" = field(
default=None,
metadata={
"help": "Decides Whether to offload parameters and gradients to CPU. "
"It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`."
},
)
ignored_modules: Optional[Iterable[torch.nn.Module]] = field(
default=None,
metadata={"help": "A list of modules to ignore for FSDP."},
)
state_dict_type: "typing.Any" = field(
default=None,
metadata={
"help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`"
},
)
state_dict_config: "typing.Any" = field(
default=None,
metadata={
"help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`"
},
)
optim_state_dict_config: "typing.Any" = field(
default=None,
metadata={
"help": "FSDP Optimizer State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.OptimStateDictConfig`"
},
)
limit_all_gathers: bool = field(
default=True,
metadata={
"help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
"without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
"too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
"Enabling this can help lower the number of CUDA malloc retries."
},
)
use_orig_params: bool = field(
default=True,
metadata={
"help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
"Useful in cases such as parameter-efficient fine-tuning. "
"Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
"This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
},
)
param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
default=None,
metadata={
"help": "A Callable[torch.nn.Module] -> None that specifies how modules "
"that are currently on the meta device should be initialized onto an actual device."
},
)
sync_module_states: bool = field(
default=True,
metadata={
"help": "If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0 "
"to ensure they are the same across all ranks after initialization"
},
)
forward_prefetch: bool = field(
default=False,
metadata={
"help": "If True, then FSDP explicitly prefetches the next upcoming "
"all-gather while executing in the forward pass. only use with Static graphs."
},
)
activation_checkpointing: bool = field(
default=False,
metadata={
"help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
"certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
"for reduced memory usage."
},
)
def __post_init__(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
prefix = "FSDP_"
if self.sharding_strategy is None:
sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
sharding_strategy = (
FSDP_SHARDING_STRATEGY.index(sharding_strategy) + 1
if not sharding_strategy.isdigit()
else int(sharding_strategy)
)
self.sharding_strategy = ShardingStrategy(sharding_strategy)
if self.cpu_offload is None:
if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
self.cpu_offload = CPUOffload(offload_params=True)
else:
self.cpu_offload = CPUOffload(offload_params=False)
if self.backward_prefetch is None:
prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
if self.state_dict_type is None:
state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
self.set_state_dict_type(state_dict_type_policy)
self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
if self.sync_module_states:
if is_npu_available():
device = torch.npu.current_device()
elif is_cuda_available():
device = torch.cuda.current_device()
elif is_xpu_available():
device = torch.xpu.current_device()
else:
raise RuntimeError(
"There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
)
self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
@staticmethod
def get_module_class_from_name(module, name):
"""
Gets a class from a module by its name.
Args:
module (`torch.nn.Module`): The module to get the class from.
name (`str`): The name of the class.
"""
modules_children = list(module.children())
if module.__class__.__name__ == name:
return module.__class__
elif len(modules_children) == 0:
return
else:
for child_module in modules_children:
module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
if module_class is not None:
return module_class
def set_auto_wrap_policy(self, model):
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
default_transformer_cls_names_to_wrap = (
",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
)
if self.auto_wrap_policy is None:
auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
transformer_cls_names_to_wrap = os.environ.get(
"FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
).split(",")
transformer_cls_to_wrap = set()
for layer_class in transformer_cls_names_to_wrap:
transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
self.auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
if min_num_params > 0:
self.auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=min_num_params
)
def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False):
if isinstance(mixed_precision, str):
if mixed_precision == "fp16":
dtype = torch.float16
elif mixed_precision == "bf16":
dtype = torch.bfloat16
elif mixed_precision == "fp32":
dtype = torch.float32
else:
raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
else:
dtype = mixed_precision
buffer_dtype = torch.float32 if buffer_autocast else dtype
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
if self.mixed_precision_policy is None or override:
self.mixed_precision_policy = MixedPrecision(
param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype
)
def set_state_dict_type(self, state_dict_type_policy):
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullOptimStateDictConfig,
FullStateDictConfig,
StateDictType,
)
self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
if self.state_dict_type == StateDictType.FULL_STATE_DICT:
if self.state_dict_config is None:
self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
if self.optim_state_dict_config is None:
self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
@dataclass
class MegatronLMPlugin:
"""
Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
activation recomputation and optimized fused kernels.
"""
tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
gradient_clipping: float = field(
default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}
)
sequence_parallelism: bool = field(
default=None,
metadata={"help": "enable sequence parallelism"},
)
recompute_activations: bool = field(
default=None,
metadata={"help": "enable selective activation recomputation"},
)
use_distributed_optimizer: bool = field(
default=None,
metadata={"help": "enable distributed optimizer"},
)
pipeline_model_parallel_split_rank: int = field(
default=None, metadata={"help": "Rank where encoder and decoder should be split."}
)
num_layers_per_virtual_pipeline_stage: int = field(
default=None, metadata={"help": "Number of layers per virtual pipeline stage."}
)
is_train_batch_min: str = field(
default=True,
metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"},
)
train_iters: int = field(
default=None,
metadata={
"help": "Total number of iterations to train over all training runs. "
"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
},
)
train_samples: int = field(
default=None,
metadata={
"help": "Total number of samples to train over all training runs. "
"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
},
)
weight_decay_incr_style: str = field(
default="constant",
metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '},
)
start_weight_decay: float = field(
default=None,
metadata={"help": "Initial weight decay coefficient for L2 regularization."},
)
end_weight_decay: float = field(
default=None,
metadata={"help": "End of run weight decay coefficient for L2 regularization."},
)
lr_decay_style: str = field(
default="linear",
metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."},
)
lr_decay_iters: int = field(
default=None,
metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."},
)
lr_decay_samples: int = field(
default=None,
metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."},
)
lr_warmup_iters: int = field(
default=None,
metadata={"help": "number of iterations to linearly warmup learning rate over."},
)
lr_warmup_samples: int = field(
default=None,
metadata={"help": "number of samples to linearly warmup learning rate over."},
)
lr_warmup_fraction: float = field(
default=None,
metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."},
)
min_lr: float = field(
default=0,
metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."},
)
consumed_samples: List[int] = field(
default=None,
metadata={
"help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call."
},
)
no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."})
scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."})
lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."})
megatron_dataset_flag: bool = field(
default=False,
metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."},
)
seq_length: int = field(
default=None,
metadata={"help": "Maximum sequence length to process."},
)
encoder_seq_length: int = field(
default=None,
metadata={"help": "Maximum sequence length to process for the encoder."},
)
decoder_seq_length: int = field(
default=None,
metadata={"help": "Maximum sequence length to process for the decoder."},
)
tensorboard_dir: str = field(
default=None,
metadata={"help": "Path to save tensorboard logs."},
)
set_all_logging_options: bool = field(
default=False,
metadata={"help": "Whether to set all logging options."},
)
eval_iters: int = field(
default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."}
)
eval_interval: int = field(
default=1000, metadata={"help": "Interval between running evaluation on validation set."}
)
return_logits: bool = field(
default=False,
metadata={"help": "Whether to return logits from the model."},
)
# custom train step args
custom_train_step_class: Optional[Any] = field(
default=None,
metadata={"help": "Custom train step class."},
)
custom_train_step_kwargs: Optional[Dict[str, Any]] = field(
default=None,
metadata={"help": "Custom train step kwargs."},
)
# custom model args
custom_model_provider_function: Optional[Callable] = field(
default=None,
metadata={"help": "Custom model provider function."},
)
custom_prepare_model_function: Optional[Callable] = field(
default=None,
metadata={"help": "Custom prepare model function."},
)
# remaining args such as enabling Alibi/ROPE positional embeddings,
# wandb logging, Multi-Query Attention, etc.
other_megatron_args: Optional[Dict[str, Any]] = field(
default=None,
metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
)
def __post_init__(self):
prefix = "MEGATRON_LM_"
if self.tp_degree is None:
self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1))
if self.pp_degree is None:
self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1))
if self.num_micro_batches is None:
self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
if self.gradient_clipping is None:
self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
if self.recompute_activations is None:
self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
if self.use_distributed_optimizer is None:
self.use_distributed_optimizer = (
str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
)
if self.sequence_parallelism is None:
self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
if self.pp_degree > 1 or self.use_distributed_optimizer:
self.DDP_impl = "local"
else:
self.DDP_impl = "torch"
if self.consumed_samples is not None:
if len(self.consumed_samples) == 1:
self.consumed_samples.extend([0, 0])
elif len(self.consumed_samples) == 2:
self.consumed_samples.append(0)
self.megatron_lm_default_args = {
"tensor_model_parallel_size": self.tp_degree,
"pipeline_model_parallel_size": self.pp_degree,
"pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank,
"num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage,
"DDP_impl": self.DDP_impl,
"use_distributed_optimizer": self.use_distributed_optimizer,
"sequence_parallel": self.sequence_parallelism,
"clip_grad": self.gradient_clipping,
"num_micro_batches": self.num_micro_batches,
"consumed_samples": self.consumed_samples,
"no_wd_decay_cond": self.no_wd_decay_cond,
"scale_lr_cond": self.scale_lr_cond,
"lr_mult": self.lr_mult,
"megatron_dataset_flag": self.megatron_dataset_flag,
"eval_iters": self.eval_iters,
"eval_interval": self.eval_interval,
}
if self.recompute_activations:
self.megatron_lm_default_args["recompute_granularity"] = "selective"
if self.tensorboard_dir is not None:
self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
if self.set_all_logging_options:
self.set_tensorboard_logging_options()
if self.other_megatron_args is not None:
self.megatron_lm_default_args.update(self.other_megatron_args)
def set_network_size_args(self, model, batch_data=None):
# Check if the model is either BERT, GPT or T5 else raise error
# set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
if "megatron-bert" in model.config.model_type.lower():
model_type_name = "bert"
num_layers = model.config.num_hidden_layers
hidden_size = model.config.hidden_size
num_attention_heads = model.config.num_attention_heads
max_position_embeddings = model.config.max_position_embeddings
num_labels = model.config.num_labels
orig_vocab_size = model.config.vocab_size
if "maskedlm" in model.__class__.__name__.lower():
pretraining_flag = True
if self.seq_length is not None:
if self.encoder_seq_length is not None:
warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.")
self.seq_length = self.encoder_seq_length
elif self.encoder_seq_length is not None:
self.seq_length = self.encoder_seq_length
elif batch_data is not None:
self.seq_length = batch_data["input_ids"].shape[1]
else:
self.seq_length = max_position_embeddings
self.megatron_lm_default_args["seq_length"] = self.seq_length
elif "gpt2" in model.config.model_type.lower():
model_type_name = "gpt"
num_layers = model.config.n_layer
hidden_size = model.config.n_embd
num_attention_heads = model.config.n_head
max_position_embeddings = model.config.n_positions
orig_vocab_size = model.config.vocab_size
pretraining_flag = True
if self.seq_length is not None:
if self.decoder_seq_length is not None:
warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.")
self.seq_length = self.decoder_seq_length
elif self.decoder_seq_length is not None:
self.seq_length = self.decoder_seq_length
elif batch_data is not None:
self.seq_length = batch_data["input_ids"].shape[1]
else:
self.seq_length = max_position_embeddings
self.megatron_lm_default_args["seq_length"] = self.seq_length
self.megatron_lm_default_args["return_logits"] = self.return_logits
self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer"
elif "t5" in model.config.model_type.lower():
model_type_name = "t5"
num_layers = model.config.num_layers
hidden_size = model.config.d_model
num_attention_heads = model.config.num_heads
max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024
orig_vocab_size = model.config.vocab_size
pretraining_flag = True
if self.encoder_seq_length is None:
if batch_data is not None:
self.encoder_seq_length = batch_data["input_ids"].shape[1]
else:
self.encoder_seq_length = max_position_embeddings
if self.decoder_seq_length is None:
if batch_data is not None:
self.decoder_seq_length = batch_data["labels"].shape[1]
else:
self.decoder_seq_length = max_position_embeddings
self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
else:
raise ValueError(
"๐ค Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
"Please check the model you are using is one of those."
)
self.megatron_lm_default_args["model_type_name"] = model_type_name
self.megatron_lm_default_args["num_layers"] = num_layers
self.megatron_lm_default_args["hidden_size"] = hidden_size
self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads
self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings
self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag
self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size
self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
if model_type_name == "bert":
self.megatron_lm_default_args["num_labels"] = num_labels
def set_mixed_precision(self, mixed_precision):
if mixed_precision == "fp16":
self.megatron_lm_default_args["fp16"] = True
elif mixed_precision == "bf16":
self.megatron_lm_default_args["bf16"] = True
self.DDP_impl = "local"
self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
def set_training_args(self, micro_batch_size, dp_degree):
self.data_parallel_size = dp_degree
self.micro_batch_size = micro_batch_size
self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches
self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
def set_optimizer_type(self, optimizer):
optimizer_name = optimizer.__class__.__name__.lower()
if "adam" in optimizer_name:
self.megatron_lm_default_args["optimizer"] = "adam"
self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0]
self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1]
self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"]
elif "sgd" in optimizer_name:
self.megatron_lm_default_args["optimizer"] = "sgd"
self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
else:
raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
def set_scheduler_args(self, scheduler):
if self.train_iters is None:
self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
if self.train_samples is not None:
self.train_samples = None
warnings.warn(
"Ignoring `train_samples` as `train_iters` based on scheduler is being used for training."
)
if self.lr_warmup_iters is None:
self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"]
if self.lr_warmup_samples is not None:
warnings.warn(
"Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
)
self.lr_warmup_samples = 0
self.megatron_lm_default_args["train_iters"] = self.train_iters
self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
self.megatron_lm_default_args["train_samples"] = self.train_samples
self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples
self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters
self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples
self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction
self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style
self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style
self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
self.megatron_lm_default_args["min_lr"] = self.min_lr
def set_tensorboard_logging_options(self):
from megatron.arguments import _add_logging_args
parser = argparse.ArgumentParser()
parser = _add_logging_args(parser)
logging_args = parser.parse_known_args()
self.dataset_args = vars(logging_args[0])
for key, value in self.dataset_args.items():
if key.startswith("log_"):
self.megatron_lm_default_args[key] = True
elif key.startswith("no_log_"):
self.megatron_lm_default_args[key.replace("no_", "")] = True
@dataclass
class BnbQuantizationConfig:
"""
A plugin to enable BitsAndBytes 4bit and 8bit quantization
"""
load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
llm_int8_threshold: float = field(
default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
)
load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
bnb_4bit_quant_type: str = field(
default="fp4",
metadata={
"help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
},
)
bnb_4bit_use_double_quant: bool = field(
default=False,
metadata={
"help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
},
)
bnb_4bit_compute_dtype: bool = field(
default="fp16",
metadata={
"help": "This sets the computational type which might be different than the input time. For example, inputs might be "
"fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
},
)
torch_dtype: torch.dtype = field(
default=None,
metadata={
"help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value"
"to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
},
)
skip_modules: List[str] = field(
default=None,
metadata={
"help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
},
)
keep_in_fp32_modules: List[str] = field(
default=None,
metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
)
def __post_init__(self):
"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.load_in_8bit, bool):
raise ValueError("load_in_8bit must be a boolean")
if not isinstance(self.load_in_4bit, bool):
raise ValueError("load_in_4bit must be a boolean")
if self.load_in_4bit and self.load_in_8bit:
raise ValueError("load_in_4bit and load_in_8 can't be both True")
if not self.load_in_4bit and not self.load_in_8bit:
raise ValueError("load_in_4bit and load_in_8 can't be both False")
if not isinstance(self.llm_int8_threshold, (int, float)):
raise ValueError("llm_int8_threshold must be a float or an int")
if not isinstance(self.bnb_4bit_quant_type, str):
raise ValueError("bnb_4bit_quant_type must be a string")
elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
if not isinstance(self.bnb_4bit_use_double_quant, bool):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
if isinstance(self.bnb_4bit_compute_dtype, str):
if self.bnb_4bit_compute_dtype == "fp32":
self.bnb_4bit_compute_dtype = torch.float32
elif self.bnb_4bit_compute_dtype == "fp16":
self.bnb_4bit_compute_dtype = torch.float16
elif self.bnb_4bit_compute_dtype == "bf16":
self.bnb_4bit_compute_dtype = torch.bfloat16
else:
raise ValueError(
f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}"
)
elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
if self.skip_modules is not None and not isinstance(self.skip_modules, list):
raise ValueError("skip_modules must be a list of strings")
if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
raise ValueError("keep_in_fp_32_modules must be a list of strings")
if self.load_in_4bit:
self.target_dtype = CustomDtype.INT4
if self.load_in_8bit:
self.target_dtype = torch.int8
if self.load_in_4bit and self.llm_int8_threshold != 6.0:
warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
if isinstance(self.torch_dtype, str):
if self.torch_dtype == "fp32":
self.torch_dtype = torch.float32
elif self.torch_dtype == "fp16":
self.torch_dtype = torch.float16
elif self.torch_dtype == "bf16":
self.torch_dtype = torch.bfloat16
else:
raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
if self.load_in_8bit and self.torch_dtype is None:
self.torch_dtype = torch.float16
if self.load_in_4bit and self.torch_dtype is None:
self.torch_dtype = self.bnb_4bit_compute_dtype
if not isinstance(self.torch_dtype, torch.dtype):
raise ValueError("torch_dtype must be a torch.dtype")
| accelerate/src/accelerate/utils/dataclasses.py/0 | {
"file_path": "accelerate/src/accelerate/utils/dataclasses.py",
"repo_id": "accelerate",
"token_count": 32006
} | 9 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from .imports import is_fp8_available
if is_fp8_available():
import transformer_engine.pytorch as te
def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True):
"""
Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart.
"""
if not is_fp8_available():
raise ImportError("Using `convert_model` requires transformer_engine to be installed.")
for name, module in model.named_children():
if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear:
# Return early if the linear layer weights are not multiples of 16
if any(p % 16 != 0 for p in module.weight.shape):
return
has_bias = module.bias is not None
te_module = te.Linear(
module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
)
te_module.weight.copy_(module.weight)
if has_bias:
te_module.bias.copy_(module.bias)
setattr(model, name, te_module)
elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln:
te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
te_module.weight.copy_(module.weight)
te_module.bias.copy_(module.bias)
setattr(model, name, te_module)
elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear:
has_bias = module.bias is not None
new_module = nn.Linear(
module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
)
new_module.weight.copy_(module.weight)
if has_bias:
new_module.bias.copy_(module.bias)
setattr(model, name, new_module)
elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln:
new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
setattr(model, name, new_module)
else:
convert_model(
module,
to_transformer_engine=to_transformer_engine,
_convert_linear=_convert_linear,
_convert_ln=_convert_ln,
)
def has_transformer_engine_layers(model):
"""
Returns whether a given model has some `transformer_engine` layer or not.
"""
if not is_fp8_available():
raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.")
for m in model.modules():
if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)):
return True
return False
| accelerate/src/accelerate/utils/transformer_engine.py/0 | {
"file_path": "accelerate/src/accelerate/utils/transformer_engine.py",
"repo_id": "accelerate",
"token_count": 1481
} | 10 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
stop = random.random() < self.p_stop
class DataLoaderTester(unittest.TestCase):
def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True):
batch_sampler_shards = [
BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches)
for i in range(2)
]
batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
assert [len(shard) for shard in batch_sampler_shards] == [len(e) for e in expected]
assert batch_sampler_lists == expected
def test_batch_sampler_shards_with_no_splits(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected)
def test_batch_sampler_shards_with_splits(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
def test_batch_sampler_shards_with_no_splits_no_even(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1]], []]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)
def test_batch_sampler_shards_with_splits_no_even(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)
def test_batch_sampler_with_varying_batch_size(self):
batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)]
assert len(batch_sampler_shards[0]) == 3
assert len(batch_sampler_shards[1]) == 2
assert list(batch_sampler_shards[0]) == [[0, 1, 2], [5, 6, 7, 8], [12, 13]]
assert list(batch_sampler_shards[1]) == [[3, 4], [9, 10, 11]]
def check_iterable_dataset_shards(
self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False
):
random.seed(seed)
reference = list(dataset)
iterable_dataset_shards = [
IterableDatasetShard(
dataset,
batch_size=batch_size,
drop_last=drop_last,
num_processes=num_processes,
process_index=i,
split_batches=split_batches,
)
for i in range(num_processes)
]
iterable_dataset_lists = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(seed)
iterable_dataset_lists.append(list(iterable_dataset_shard))
shard_batch_size = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
first_list = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
assert len(l) == len(first_list)
assert (len(l) % shard_batch_size) == 0
observed = []
for idx in range(0, len(first_list), shard_batch_size):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(reference) < len(observed):
reference += reference
assert observed == reference[: len(observed)]
def test_iterable_dataset_shard(self):
seed = 42
dataset = RandomIterableDataset()
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
# Edge case with a very small dataset
dataset = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
def test_skip_batch_sampler(self):
batch_sampler = BatchSampler(range(16), batch_size=4, drop_last=False)
new_batch_sampler = SkipBatchSampler(batch_sampler, 2)
assert list(new_batch_sampler) == [[8, 9, 10, 11], [12, 13, 14, 15]]
def test_skip_data_loader(self):
dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
assert [t.tolist() for t in dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]]
def test_skip_first_batches(self):
dataloader = DataLoader(list(range(16)), batch_size=4)
new_dataloader = skip_first_batches(dataloader, num_batches=2)
assert [t.tolist() for t in new_dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]]
def test_end_of_dataloader(self):
dataloader = DataLoaderShard(list(range(16)), batch_size=4)
for idx, _ in enumerate(dataloader):
assert dataloader.end_of_dataloader == (idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(dataloader):
assert dataloader.end_of_dataloader == (idx == 3)
def test_end_of_dataloader_dispatcher(self):
Accelerator()
dataloader = DataLoaderDispatcher(range(16), batch_size=4)
for idx, _ in enumerate(dataloader):
assert dataloader.end_of_dataloader == (idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(dataloader):
assert dataloader.end_of_dataloader == (idx == 3)
| accelerate/tests/test_data_loader.py/0 | {
"file_path": "accelerate/tests/test_data_loader.py",
"repo_id": "accelerate",
"token_count": 8488
} | 11 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import torch
from accelerate import Accelerator, debug_launcher
from accelerate.state import AcceleratorState, GradientState
from accelerate.test_utils import require_cpu, require_huggingface_suite
from accelerate.utils import GradientAccumulationPlugin
def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
scheduler.step()
if step_scheduler_with_optimizer or (num_processes == 1):
assert (
scheduler.scheduler.last_epoch == num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})"
else:
assert (
scheduler.scheduler.last_epoch != num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})"
def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
optimizer._is_overflow = False
scheduler.step()
expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
# Optimizer has not stepped
optimizer._is_overflow = True
scheduler.step()
if not step_scheduler_with_optimizer:
expected_lr = 1 - 2 / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
def accumulation_test(num_processes: int = 2):
"""
With this test, an observed batch size of 64 should result in neglible
differences in the scheduler after going through the correct number of steps.
Uses single, two, and four steps to test.
"""
from transformers import get_linear_schedule_with_warmup
steps = [1, 2, 4]
for num_steps in steps:
plugin = GradientAccumulationPlugin(num_steps=num_steps, adjust_scheduler=num_steps > 1)
accelerator = Accelerator(gradient_accumulation_plugin=plugin)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=10.0)
scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=0, num_training_steps=20)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
for i in range(10 * num_steps):
with accelerator.accumulate(model):
optimizer.step()
scheduler.step()
if i == (10 * num_steps - 2):
assert (
scheduler.get_last_lr()[0] != 0
), f"Wrong lr found at second-to-last step, expected non-zero, got {scheduler.get_last_lr()[0]}. num_steps: {num_steps}"
assert (
scheduler.get_last_lr()[0] == 0
), f"Wrong lr found at last step, expected 0, got {scheduler.get_last_lr()[0]}"
GradientState._reset_state()
@require_cpu
class SchedulerTester(unittest.TestCase):
def test_lambda_scheduler_steps_with_optimizer_single_process(self):
debug_launcher(partial(lambda_test, num_processes=1), num_processes=1)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_lambda_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(lambda_test)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(one_cycle_test)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False))
def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False))
@require_huggingface_suite
def test_accumulation(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(accumulation_test, num_processes=1))
debug_launcher(accumulation_test)
| accelerate/tests/test_scheduler.py/0 | {
"file_path": "accelerate/tests/test_scheduler.py",
"repo_id": "accelerate",
"token_count": 2538
} | 12 |
# Constitutional AI
This repo includes the recipe for training the following models:
* https://huggingface.co/HuggingFaceH4/mistral-7b-anthropic
* https://huggingface.co/HuggingFaceH4/mistral-7b-grok
## Full training examples
You will require 8 GPUs (80GB of VRAM) to train the full model.
```shell
# Step 1 - SFT
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/constitutional-ai/sft/config_{grok,anthropic}.yaml
# Step 2 - DPO
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/constitutional-ai/dpo/config_anthropic.yaml
# Note that we did not include the DPO recipe for grok, as that model's seems overtrained and too snarky.
```
## Advanced: generating you own dataset
To generate the constitutional AI dataset, see https://github.com/huggingface/llm-swarm/tree/main/examples/constitutional-ai for detailed instructions if you want build or customize the dataset.
| alignment-handbook/recipes/constitutional-ai/README.md/0 | {
"file_path": "alignment-handbook/recipes/constitutional-ai/README.md",
"repo_id": "alignment-handbook",
"token_count": 326
} | 13 |
# Instructions to Replicate Zephyr-7b-ฮฒ
As described in the Zephyr [technical report](https://huggingface.co/papers/2310.16944), training this model proceeds in two steps:
1. Apply SFT to fine-tune Mistral 7B on a filtered version of the UltraChat dataset ([link](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)). The result is an SFT model like [`zephyr-7b-sft-full`](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) or [`zephyr-7b-sft-qlora`](https://huggingface.co/alignment-handbook/zephyr-7b-sft-qlora).
2. Align the SFT model to AI feedback via DPO on a preprocessed version of the UltraFeedback dataset ([link](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)). The result is an DPO model like [`zephyr-7b-dpo-full`](https://huggingface.co/alignment-handbook/zephyr-7b-dpo-full) or [`zephyr-7b-dpo-qlora`](https://huggingface.co/alignment-handbook/zephyr-7b-dpo-qlora).
**Note:** after the release of Zephyr, the team at [Argilla](https://argilla.io) found that the source UltraFeedback dataset had a few thousand incorrect preference labels from GPT-4. Additionally, TRL's `SFTTrainer` had a bug in the learning rate scheduler which terminated training early. Accounting for these changes led us to find a better set of hyperparameters from those described in the technical report. In particular, for DPO training we found that training for 1 epoch with `beta=0.01` was suffucient to achieve comparable performance to `zephyr-7b-beta` (vs. 3 epochs with `beta=0.1`).
See below for commands to train these models using either DeepSpeed ZeRO-3 or LoRA.
## Full training examples
You will require 8 GPUs (80GB of VRAM) to train the full model.
```shell
# Step 1 - SFT
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/zephyr-7b-beta/sft/config_full.yaml
# Step 2 - DPO
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/zephyr-7b-beta/dpo/config_full.yaml
```
## QLoRA training examples
```shell
# Step 1 - SFT
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_sft.py recipes/zephyr-7b-beta/sft/config_qlora.yaml --load_in_4bit=true
# Step 2 - DPO
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_dpo.py recipes/zephyr-7b-beta/dpo/config_qlora.yaml
``` | alignment-handbook/recipes/zephyr-7b-beta/README.md/0 | {
"file_path": "alignment-handbook/recipes/zephyr-7b-beta/README.md",
"repo_id": "alignment-handbook",
"token_count": 888
} | 14 |
# coding=utf-8
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Literal, Optional
from datasets import DatasetDict, concatenate_datasets, load_dataset, load_from_disk
from datasets.builder import DatasetGenerationError
from .configs import DataArguments
DEFAULT_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
COLUMNS_TO_KEEP = ["messages", "chosen", "rejected", "prompt", "completion", "label"]
def maybe_insert_system_message(messages, tokenizer):
if messages[0]["role"] == "system":
return
# chat template can be one of two attributes, we check in order
chat_template = tokenizer.chat_template
if chat_template is None:
chat_template = tokenizer.default_chat_template
# confirm the jinja template refers to a system message before inserting
if "system" in chat_template or "<|im_start|>" in chat_template:
messages.insert(0, {"role": "system", "content": ""})
def apply_chat_template(
example,
tokenizer,
task: Literal["sft", "generation", "rm", "dpo"],
auto_insert_empty_system_msg: bool = True,
):
if task in ["sft", "generation"]:
messages = example["messages"]
# We add an empty system message if there is none
if auto_insert_empty_system_msg:
maybe_insert_system_message(messages, tokenizer)
example["text"] = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True if task == "generation" else False
)
elif task == "rm":
if all(k in example.keys() for k in ("chosen", "rejected")):
chosen_messages = example["chosen"]
rejected_messages = example["rejected"]
# We add an empty system message if there is none
if auto_insert_empty_system_msg:
maybe_insert_system_message(chosen_messages, tokenizer)
maybe_insert_system_message(rejected_messages, tokenizer)
example["text_chosen"] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
example["text_rejected"] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
else:
raise ValueError(
f"Could not format example as dialogue for `rm` task! Require `[chosen, rejected]` keys but found {list(example.keys())}"
)
elif task == "dpo":
if all(k in example.keys() for k in ("chosen", "rejected")):
# For DPO, the inputs are triples of (prompt, chosen, rejected), where `chosen` and `rejected` are the final turn of a dialogue
# We therefore need to extract the N-1 turns to form the prompt
prompt_messages = example["chosen"][:-1]
# Prepend a system message if the first message is not a system message
if auto_insert_empty_system_msg:
maybe_insert_system_message(prompt_messages, tokenizer)
# Now we extract the final turn to define chosen/rejected responses
chosen_messages = example["chosen"][-1:]
rejected_messages = example["rejected"][-1:]
example["text_chosen"] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
example["text_rejected"] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
example["text_prompt"] = tokenizer.apply_chat_template(prompt_messages, tokenize=False)
else:
raise ValueError(
f"Could not format example as dialogue for `dpo` task! Require `[chosen, rejected]` keys but found {list(example.keys())}"
)
else:
raise ValueError(
f"Task {task} not supported, please ensure that the provided task is one of {['sft', 'generation', 'rm', 'dpo']}"
)
return example
def get_datasets(
data_config: DataArguments | dict,
splits: List[str] = ["train", "test"],
configs: Optional[List[str]] = None,
shuffle: bool = True,
) -> DatasetDict:
"""
Loads one or more datasets with varying training set proportions.
Args:
data_config (`DataArguments` or `dict`):
Dataset configuration and split proportions.
splits (`List[str]`, *optional*, defaults to `['train', 'test']`):
Dataset splits to load and mix. Assumes the splits exist in all datasets and have a `train_` or `test_` prefix.
shuffle (`bool`, *optional*, defaults to `True`):
Whether to shuffle the training and testing/validation data.
Returns
[`DatasetDict`]: The dataset dictionary containing the loaded datasets.
"""
if type(data_config) is DataArguments:
# Structure of the config to read the datasets and their mix
# datasets_mixer:
# - 'dataset1': 0.5
# - 'dataset2': 0.3
# - 'dataset3': 0.2
dataset_mixer = data_config.dataset_mixer
elif isinstance(data_config, dict):
# Structure of the input is:
# dataset_mixer = {
# "dataset1": 0.5,
# "dataset1": 0.3,
# "dataset1": 0.2,
# }
dataset_mixer = data_config
else:
raise ValueError(f"Data config {data_config} not recognized.")
raw_datasets = mix_datasets(dataset_mixer, splits=splits, configs=configs, shuffle=shuffle)
return raw_datasets
def mix_datasets(
dataset_mixer: dict, configs: Optional[List[str]] = None, splits: Optional[List[str]] = None, shuffle=True
) -> DatasetDict:
"""
Loads and mixes datasets according to proportions specified in `dataset_mixer`.
Args:
dataset_mixer (`dict`):
Dictionary containing the dataset names and their training proportions. By default, all test proportions are 1.
configs (Optional[List[str]], *optional*, defaults to `None`):
List of dataset config names. If given must be the same length as 'dataset_mixer' keys.
splits (Optional[List[str]], *optional*, defaults to `None`):
Dataset splits to load and mix. Assumes the splits exist in all datasets and have a `train_` or `test_` prefix.
shuffle (`bool`, *optional*, defaults to `True`):
Whether to shuffle the training and testing/validation data.
"""
configs = [None] * len(dataset_mixer) if not configs else configs
if configs is not None and len(configs) != len(dataset_mixer):
raise ValueError("The number of given dataset config names must be the same as the given number of datasets.")
raw_datasets = DatasetDict()
raw_train_datasets = []
raw_val_datasets = []
fracs = []
for (ds, frac), ds_config in zip(dataset_mixer.items(), configs):
fracs.append(frac)
for split in splits:
try:
# Try first if dataset on a Hub repo
dataset = load_dataset(ds, ds_config, split=split)
except DatasetGenerationError:
# If not, check local dataset
dataset = load_from_disk(os.path.join(ds, split))
# Remove redundant columns to avoid schema conflicts on load
dataset = dataset.remove_columns([col for col in dataset.column_names if col not in COLUMNS_TO_KEEP])
if "train" in split:
raw_train_datasets.append(dataset)
elif "test" in split:
raw_val_datasets.append(dataset)
else:
raise ValueError(f"Split type {split} not recognized as one of test or train.")
if any(frac < 0 for frac in fracs):
raise ValueError("Dataset fractions cannot be negative.")
if len(raw_train_datasets) > 0:
train_subsets = []
for dataset, frac in zip(raw_train_datasets, fracs):
train_subset = dataset.select(range(int(frac * len(dataset))))
train_subsets.append(train_subset)
if shuffle:
raw_datasets["train"] = concatenate_datasets(train_subsets).shuffle(seed=42)
else:
raw_datasets["train"] = concatenate_datasets(train_subsets)
# No subsampling for test datasets to enable fair comparison across models
if len(raw_val_datasets) > 0:
if shuffle:
raw_datasets["test"] = concatenate_datasets(raw_val_datasets).shuffle(seed=42)
else:
raw_datasets["test"] = concatenate_datasets(raw_val_datasets)
if len(raw_datasets) == 0:
raise ValueError(
f"Dataset {dataset_mixer} not recognized with split {split}. Check the dataset has been correctly formatted."
)
return raw_datasets
| alignment-handbook/src/alignment/data.py/0 | {
"file_path": "alignment-handbook/src/alignment/data.py",
"repo_id": "alignment-handbook",
"token_count": 3848
} | 15 |
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.formatting.provider": "none",
"python.testing.pytestArgs": [
"candle-pyo3"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
} | candle/.vscode/settings.json/0 | {
"file_path": "candle/.vscode/settings.json",
"repo_id": "candle",
"token_count": 123
} | 16 |
# Creating a WASM app
| candle/candle-book/src/apps/wasm.md/0 | {
"file_path": "candle/candle-book/src/apps/wasm.md",
"repo_id": "candle",
"token_count": 7
} | 17 |
# Fine-tuning
| candle/candle-book/src/training/finetuning.md/0 | {
"file_path": "candle/candle-book/src/training/finetuning.md",
"repo_id": "candle",
"token_count": 6
} | 18 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::str::FromStr;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn cos_sin(n: usize, device: &Device) -> Result<Tensor> {
let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect();
let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect();
let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect();
let xs = Tensor::from_vec(xs, (n, 1), device)?;
let ys = Tensor::from_vec(ys, (1, n), device)?;
let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?;
Ok(xs.matmul(&ys)?)
}
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let args = std::env::args().collect::<Vec<String>>();
let n = if args.len() < 2 {
2000usize
} else {
usize::from_str(&args[1])?
};
let xys_cpu = cos_sin(n, &Device::Cpu)?;
let xys = cos_sin(n, &device)?;
println!("{xys_cpu:?} {xys:?}");
let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?;
println!("{sum_keepdim_cpu}");
let sum_keepdim = xys.sum_keepdim(1)?;
println!("{sum_keepdim}");
let start = std::time::Instant::now();
let n_iters = 100;
let mut v = 0f32;
for _i in 0..n_iters {
let sum_keepdim = xys.sum_keepdim(1)?;
let sum_keepdim = sum_keepdim.sum_keepdim(0)?;
let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?;
v += sum_keepdim;
}
let elapsed = start.elapsed();
if v > 0. {
println!(
"ran {n_iters} iterations, time per iter: {:?} ({v})",
elapsed.div_f64(n_iters as f64)
);
}
Ok(())
}
| candle/candle-core/examples/cuda_sum_benchmark.rs/0 | {
"file_path": "candle/candle-core/examples/cuda_sum_benchmark.rs",
"repo_id": "candle",
"token_count": 827
} | 19 |
use crate::backend::BackendDevice;
use crate::cpu_backend::CpuDevice;
use crate::{CpuStorage, DType, Result, Shape, Storage, WithDType};
/// A `DeviceLocation` represents a physical device whereas multiple `Device`
/// can live on the same location (typically for cuda devices).
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum DeviceLocation {
Cpu,
Cuda { gpu_id: usize },
Metal { gpu_id: usize },
}
#[derive(Debug, Clone)]
pub enum Device {
Cpu,
Cuda(crate::CudaDevice),
Metal(crate::MetalDevice),
}
pub trait NdArray {
fn shape(&self) -> Result<Shape>;
fn to_cpu_storage(&self) -> CpuStorage;
}
impl<S: WithDType> NdArray for S {
fn shape(&self) -> Result<Shape> {
Ok(Shape::from(()))
}
fn to_cpu_storage(&self) -> CpuStorage {
S::to_cpu_storage(&[*self])
}
}
impl<S: WithDType, const N: usize> NdArray for &[S; N] {
fn shape(&self) -> Result<Shape> {
Ok(Shape::from(self.len()))
}
fn to_cpu_storage(&self) -> CpuStorage {
S::to_cpu_storage(self.as_slice())
}
}
impl<S: WithDType> NdArray for &[S] {
fn shape(&self) -> Result<Shape> {
Ok(Shape::from(self.len()))
}
fn to_cpu_storage(&self) -> CpuStorage {
S::to_cpu_storage(self)
}
}
impl<S: WithDType, const N: usize, const M: usize> NdArray for &[[S; N]; M] {
fn shape(&self) -> Result<Shape> {
Ok(Shape::from((M, N)))
}
fn to_cpu_storage(&self) -> CpuStorage {
S::to_cpu_storage_owned(self.concat())
}
}
impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> NdArray
for &[[[S; N3]; N2]; N1]
{
fn shape(&self) -> Result<Shape> {
Ok(Shape::from((N1, N2, N3)))
}
fn to_cpu_storage(&self) -> CpuStorage {
let mut vec = Vec::with_capacity(N1 * N2 * N3);
for i1 in 0..N1 {
for i2 in 0..N2 {
vec.extend(self[i1][i2])
}
}
S::to_cpu_storage_owned(vec)
}
}
impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> NdArray
for &[[[[S; N4]; N3]; N2]; N1]
{
fn shape(&self) -> Result<Shape> {
Ok(Shape::from((N1, N2, N3, N4)))
}
fn to_cpu_storage(&self) -> CpuStorage {
let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4);
for i1 in 0..N1 {
for i2 in 0..N2 {
for i3 in 0..N3 {
vec.extend(self[i1][i2][i3])
}
}
}
S::to_cpu_storage_owned(vec)
}
}
impl<S: NdArray> NdArray for Vec<S> {
fn shape(&self) -> Result<Shape> {
if self.is_empty() {
crate::bail!("empty array")
}
let shape0 = self[0].shape()?;
let n = self.len();
for v in self.iter() {
let shape = v.shape()?;
if shape != shape0 {
crate::bail!("two elements have different shapes {shape:?} {shape0:?}")
}
}
Ok(Shape::from([[n].as_slice(), shape0.dims()].concat()))
}
fn to_cpu_storage(&self) -> CpuStorage {
// This allocates intermediary memory and shouldn't be necessary.
let storages = self.iter().map(|v| v.to_cpu_storage()).collect::<Vec<_>>();
CpuStorage::concat(storages.as_slice()).unwrap()
}
}
impl Device {
pub fn new_cuda(ordinal: usize) -> Result<Self> {
Ok(Self::Cuda(crate::CudaDevice::new(ordinal)?))
}
pub fn new_metal(ordinal: usize) -> Result<Self> {
Ok(Self::Metal(crate::MetalDevice::new(ordinal)?))
}
pub fn set_seed(&self, seed: u64) -> Result<()> {
match self {
Self::Cpu => CpuDevice.set_seed(seed),
Self::Cuda(c) => c.set_seed(seed),
Self::Metal(m) => m.set_seed(seed),
}
}
pub fn same_device(&self, rhs: &Self) -> bool {
match (self, rhs) {
(Self::Cpu, Self::Cpu) => true,
(Self::Cuda(lhs), Self::Cuda(rhs)) => lhs.same_device(rhs),
(Self::Metal(lhs), Self::Metal(rhs)) => lhs.same_device(rhs),
_ => false,
}
}
pub fn location(&self) -> DeviceLocation {
match self {
Self::Cpu => DeviceLocation::Cpu,
Self::Cuda(device) => device.location(),
Device::Metal(device) => device.location(),
}
}
pub fn is_cpu(&self) -> bool {
matches!(self, Self::Cpu)
}
pub fn is_cuda(&self) -> bool {
matches!(self, Self::Cuda(_))
}
pub fn is_metal(&self) -> bool {
matches!(self, Self::Metal(_))
}
pub fn cuda_if_available(ordinal: usize) -> Result<Self> {
if crate::utils::cuda_is_available() {
Self::new_cuda(ordinal)
} else {
Ok(Self::Cpu)
}
}
pub(crate) fn rand_uniform_f64(
&self,
lo: f64,
up: f64,
shape: &Shape,
dtype: DType,
) -> Result<Storage> {
match self {
Device::Cpu => {
let storage = CpuDevice.rand_uniform(shape, dtype, lo, up)?;
Ok(Storage::Cpu(storage))
}
Device::Cuda(device) => {
// TODO: Remove the special case if we start supporting generating f16/bf16 directly.
if dtype == DType::F16 || dtype == DType::BF16 {
let storage = device.rand_uniform(shape, DType::F32, lo, up)?;
Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype)
} else {
let storage = device.rand_uniform(shape, dtype, lo, up)?;
Ok(Storage::Cuda(storage))
}
}
Device::Metal(device) => {
let storage = device.rand_uniform(shape, dtype, lo, up)?;
Ok(Storage::Metal(storage))
}
}
}
pub(crate) fn rand_uniform<T: crate::FloatDType>(
&self,
lo: T,
up: T,
shape: &Shape,
) -> Result<Storage> {
self.rand_uniform_f64(lo.to_f64(), up.to_f64(), shape, T::DTYPE)
}
pub(crate) fn rand_normal_f64(
&self,
mean: f64,
std: f64,
shape: &Shape,
dtype: DType,
) -> Result<Storage> {
match self {
Device::Cpu => {
let storage = CpuDevice.rand_normal(shape, dtype, mean, std)?;
Ok(Storage::Cpu(storage))
}
Device::Cuda(device) => {
// TODO: Remove the special case if we start supporting generating f16/bf16 directly.
if dtype == DType::F16 || dtype == DType::BF16 {
let storage = device.rand_normal(shape, DType::F32, mean, std)?;
Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype)
} else {
let storage = device.rand_normal(shape, dtype, mean, std)?;
Ok(Storage::Cuda(storage))
}
}
Device::Metal(device) => {
let storage = device.rand_normal(shape, dtype, mean, std)?;
Ok(Storage::Metal(storage))
}
}
}
pub(crate) fn rand_normal<T: crate::FloatDType>(
&self,
mean: T,
std: T,
shape: &Shape,
) -> Result<Storage> {
self.rand_normal_f64(mean.to_f64(), std.to_f64(), shape, T::DTYPE)
}
pub(crate) fn ones(&self, shape: &Shape, dtype: DType) -> Result<Storage> {
match self {
Device::Cpu => {
let storage = CpuDevice.ones_impl(shape, dtype)?;
Ok(Storage::Cpu(storage))
}
Device::Cuda(device) => {
let storage = device.ones_impl(shape, dtype)?;
Ok(Storage::Cuda(storage))
}
Device::Metal(device) => {
let storage = device.ones_impl(shape, dtype)?;
Ok(Storage::Metal(storage))
}
}
}
pub(crate) fn zeros(&self, shape: &Shape, dtype: DType) -> Result<Storage> {
match self {
Device::Cpu => {
let storage = CpuDevice.zeros_impl(shape, dtype)?;
Ok(Storage::Cpu(storage))
}
Device::Cuda(device) => {
let storage = device.zeros_impl(shape, dtype)?;
Ok(Storage::Cuda(storage))
}
Device::Metal(device) => {
let storage = device.zeros_impl(shape, dtype)?;
Ok(Storage::Metal(storage))
}
}
}
pub(crate) fn storage<A: NdArray>(&self, array: A) -> Result<Storage> {
match self {
Device::Cpu => Ok(Storage::Cpu(array.to_cpu_storage())),
Device::Cuda(device) => {
let storage = array.to_cpu_storage();
let storage = device.storage_from_cpu_storage(&storage)?;
Ok(Storage::Cuda(storage))
}
Device::Metal(device) => {
let storage = array.to_cpu_storage();
let storage = device.storage_from_cpu_storage(&storage)?;
Ok(Storage::Metal(storage))
}
}
}
pub(crate) fn storage_owned<S: WithDType>(&self, data: Vec<S>) -> Result<Storage> {
match self {
Device::Cpu => Ok(Storage::Cpu(S::to_cpu_storage_owned(data))),
Device::Cuda(device) => {
let storage = S::to_cpu_storage_owned(data);
let storage = device.storage_from_cpu_storage(&storage)?;
Ok(Storage::Cuda(storage))
}
Device::Metal(device) => {
let storage = S::to_cpu_storage_owned(data);
let storage = device.storage_from_cpu_storage(&storage)?;
Ok(Storage::Metal(storage))
}
}
}
}
| candle/candle-core/src/device.rs/0 | {
"file_path": "candle/candle-core/src/device.rs",
"repo_id": "candle",
"token_count": 5149
} | 20 |
#![allow(unused)]
use super::GgmlDType;
use crate::{CudaDevice, CudaStorage, Error, Result};
pub struct QCudaStorage {
dtype: GgmlDType,
device: CudaDevice,
}
impl QCudaStorage {
pub fn zeros(_: &CudaDevice, _: usize, _: GgmlDType) -> Result<Self> {
Err(Error::NotCompiledWithCudaSupport)
}
pub fn dtype(&self) -> GgmlDType {
self.dtype
}
pub fn device(&self) -> &CudaDevice {
&self.device
}
pub fn dequantize(&self, _elem_count: usize) -> Result<CudaStorage> {
Err(Error::NotCompiledWithCudaSupport)
}
pub fn quantize(&mut self, _src: &CudaStorage) -> Result<()> {
Err(Error::NotCompiledWithCudaSupport)
}
pub fn storage_size_in_bytes(&self) -> usize {
0
}
pub fn fwd(
&self,
_self_shape: &crate::Shape,
_storage: &CudaStorage,
_layout: &crate::Layout,
) -> Result<(CudaStorage, crate::Shape)> {
Err(Error::NotCompiledWithCudaSupport)
}
}
pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>(
_device: &CudaDevice,
_data: &[T],
) -> Result<super::QStorage> {
Err(Error::NotCompiledWithCudaSupport)
}
| candle/candle-core/src/quantized/dummy_cuda.rs/0 | {
"file_path": "candle/candle-core/src/quantized/dummy_cuda.rs",
"repo_id": "candle",
"token_count": 537
} | 21 |
use crate::{shape::Dim, Error, Result, Shape, Tensor};
impl Tensor {
/// Concatenates two or more tensors along a particular dimension.
///
/// All tensors must of the same rank, and the output will have
/// the same rank
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = Tensor::cat(&[&a, &b], 0)?;
/// assert_eq!(c.shape().dims(), &[4, 3]);
///
/// let c = Tensor::cat(&[&a, &b], 1)?;
/// assert_eq!(c.shape().dims(), &[2, 6]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn cat<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> {
if args.is_empty() {
Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())?
}
let arg0 = args[0].as_ref();
if args.len() == 1 {
return Ok(arg0.clone());
}
let dim = dim.to_index(arg0.shape(), "cat")?;
for arg in args {
arg.as_ref().check_dim(dim, "cat")?;
}
for (arg_idx, arg) in args.iter().enumerate() {
let arg = arg.as_ref();
if arg0.rank() != arg.rank() {
Err(Error::UnexpectedNumberOfDims {
expected: arg0.rank(),
got: arg.rank(),
shape: arg.shape().clone(),
}
.bt())?
}
for (dim_idx, (v1, v2)) in arg0
.shape()
.dims()
.iter()
.zip(arg.shape().dims().iter())
.enumerate()
{
if dim_idx != dim && v1 != v2 {
Err(Error::ShapeMismatchCat {
dim: dim_idx,
first_shape: arg0.shape().clone(),
n: arg_idx + 1,
nth_shape: arg.shape().clone(),
}
.bt())?
}
}
}
if dim == 0 {
Self::cat0(args)
} else {
let all_contiguous = args.iter().all(|v| v.as_ref().is_contiguous());
if all_contiguous {
Self::cat_contiguous(args, dim)
} else {
let args: Vec<Tensor> = args
.iter()
.map(|a| a.as_ref().transpose(0, dim))
.collect::<Result<Vec<_>>>()?;
let cat = Self::cat0(&args)?;
cat.transpose(0, dim)
}
}
}
fn cat0<A: AsRef<Tensor>>(args: &[A]) -> Result<Self> {
if args.is_empty() {
Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())?
}
let arg0 = args[0].as_ref();
if args.len() == 1 {
return Ok(arg0.clone());
}
let rank = arg0.rank();
let device = arg0.device();
let dtype = arg0.dtype();
let first_dims = arg0.shape().dims();
let mut cat_dims = first_dims.to_vec();
cat_dims[0] = 0;
let mut offsets = vec![0usize];
for (arg_idx, arg) in args.iter().enumerate() {
let arg = arg.as_ref();
if arg.dtype() != dtype {
Err(Error::DTypeMismatchBinaryOp {
lhs: dtype,
rhs: arg.dtype(),
op: "cat",
}
.bt())?
}
if arg.device().location() != device.location() {
Err(Error::DeviceMismatchBinaryOp {
lhs: device.location(),
rhs: arg.device().location(),
op: "cat",
}
.bt())?
}
if rank != arg.rank() {
Err(Error::UnexpectedNumberOfDims {
expected: rank,
got: arg.rank(),
shape: arg.shape().clone(),
}
.bt())?
}
for (dim_idx, (v1, v2)) in arg0
.shape()
.dims()
.iter()
.zip(arg.shape().dims().iter())
.enumerate()
{
if dim_idx == 0 {
cat_dims[0] += v2;
}
if dim_idx != 0 && v1 != v2 {
Err(Error::ShapeMismatchCat {
dim: dim_idx,
first_shape: arg0.shape().clone(),
n: arg_idx + 1,
nth_shape: arg.shape().clone(),
}
.bt())?
}
}
let next_offset = offsets.last().unwrap() + arg.elem_count();
offsets.push(next_offset);
}
let shape = Shape::from(cat_dims);
let op = crate::op::BackpropOp::new(args, |args| crate::op::Op::Cat(args, 0));
let mut storage = device.zeros(&shape, dtype)?;
for (arg, &offset) in args.iter().zip(offsets.iter()) {
let arg = arg.as_ref();
arg.storage()
.copy_strided_src(&mut storage, offset, arg.layout())?;
}
Ok(crate::tensor::from_storage(storage, shape, op, false))
}
fn cat_contiguous<A: AsRef<Tensor>>(args: &[A], dim: usize) -> Result<Self> {
if args.is_empty() {
Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())?
}
let arg0 = args[0].as_ref();
if args.len() == 1 {
return Ok(arg0.clone());
}
let rank = arg0.rank();
let device = arg0.device();
let dtype = arg0.dtype();
let first_dims = arg0.shape().dims();
let mut cat_dims = first_dims.to_vec();
cat_dims[dim] = 0;
for (arg_idx, arg) in args.iter().enumerate() {
let arg = arg.as_ref();
if arg.dtype() != dtype {
Err(Error::DTypeMismatchBinaryOp {
lhs: dtype,
rhs: arg.dtype(),
op: "cat",
}
.bt())?
}
if arg.device().location() != device.location() {
Err(Error::DeviceMismatchBinaryOp {
lhs: device.location(),
rhs: arg.device().location(),
op: "cat",
}
.bt())?
}
if rank != arg.rank() {
Err(Error::UnexpectedNumberOfDims {
expected: rank,
got: arg.rank(),
shape: arg.shape().clone(),
}
.bt())?
}
for (dim_idx, (v1, v2)) in arg0
.shape()
.dims()
.iter()
.zip(arg.shape().dims().iter())
.enumerate()
{
if dim_idx == dim {
cat_dims[dim] += v2;
}
if dim_idx != dim && v1 != v2 {
Err(Error::ShapeMismatchCat {
dim: dim_idx,
first_shape: arg0.shape().clone(),
n: arg_idx + 1,
nth_shape: arg.shape().clone(),
}
.bt())?
}
}
}
let cat_target_dim_len = cat_dims[dim];
let block_size: usize = cat_dims.iter().skip(1 + dim).product();
let shape = Shape::from(cat_dims);
let op = crate::op::BackpropOp::new(args, |args| crate::op::Op::Cat(args, dim));
let mut storage = device.zeros(&shape, dtype)?;
let mut dst_o = 0;
for arg in args.iter() {
let arg = arg.as_ref();
let arg_dims = arg.shape().dims();
let d1: usize = arg_dims.iter().take(dim).product();
let d2 = block_size * arg_dims[dim];
let dst_s = block_size * cat_target_dim_len;
let src_o = arg.layout().start_offset();
arg.storage().copy2d(
&mut storage,
d1,
d2,
/* src_s */ d2,
dst_s,
src_o,
dst_o,
)?;
dst_o += d2;
}
Ok(crate::tensor::from_storage(storage, shape, op, false))
}
}
| candle/candle-core/src/tensor_cat.rs/0 | {
"file_path": "candle/candle-core/src/tensor_cat.rs",
"repo_id": "candle",
"token_count": 5110
} | 22 |
use candle_core::{DType, Result, Tensor};
#[test]
fn npy() -> Result<()> {
let npy = Tensor::read_npy("tests/test.npy")?;
assert_eq!(
npy.to_dtype(DType::U8)?.to_vec1::<u8>()?,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
);
Ok(())
}
#[test]
fn npz() -> Result<()> {
let npz = Tensor::read_npz("tests/test.npz")?;
assert_eq!(npz.len(), 2);
assert_eq!(npz[0].0, "x");
assert_eq!(npz[1].0, "x_plus_one");
assert_eq!(
npz[1].1.to_dtype(DType::U8)?.to_vec1::<u8>()?,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
);
Ok(())
}
| candle/candle-core/tests/serialization_tests.rs/0 | {
"file_path": "candle/candle-core/tests/serialization_tests.rs",
"repo_id": "candle",
"token_count": 333
} | 23 |
[package]
name = "candle-examples"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
accelerate-src = { workspace = true, optional = true }
candle = { workspace = true }
candle-datasets = { workspace = true, optional = true }
candle-nn = { workspace = true }
candle-transformers = { workspace = true }
candle-flash-attn = { workspace = true, optional = true }
candle-onnx = { workspace = true, optional = true }
csv = "1.3.0"
cudarc = { workspace = true, optional = true }
half = { workspace = true, optional = true }
hf-hub = { workspace = true, features = ["tokio"] }
image = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
num-traits = { workspace = true }
pyo3 = { version = "0.20.0", features = ["auto-initialize"], optional = true }
rayon = { workspace = true }
rubato = { version = "0.15.0", optional = true }
safetensors = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
symphonia = { version = "0.5.3", features = ["all"], optional = true }
tokenizers = { workspace = true, features = ["onig"] }
cpal= { version = "0.15.2", optional = true }
[dev-dependencies]
anyhow = { workspace = true }
byteorder = { workspace = true }
clap = { workspace = true }
imageproc = { workspace = true }
memmap2 = { workspace = true }
rand = { workspace = true }
rusttype = { workspace = true }
tracing = { workspace = true }
tracing-chrome = { workspace = true }
tracing-subscriber = { workspace = true }
# Necessary to disambiguate with tokio in wasm examples which are 1.28.1
tokio = "1.29.1"
[build-dependencies]
anyhow = { workspace = true }
bindgen_cuda = { version = "0.1.1", optional = true }
[features]
default = []
accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate", "candle-transformers/accelerate"]
cuda = ["candle/cuda", "candle-nn/cuda", "candle-transformers/cuda", "dep:bindgen_cuda"]
cudnn = ["candle/cudnn"]
flash-attn = ["cuda", "candle-transformers/flash-attn", "dep:candle-flash-attn"]
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl", "candle-transformers/mkl"]
nccl = ["cuda", "cudarc/nccl", "dep:half"]
onnx = ["candle-onnx"]
metal = ["candle/metal", "candle-nn/metal"]
microphone = ["cpal"]
encodec = ["cpal", "symphonia", "rubato"]
[[example]]
name = "llama_multiprocess"
required-features = ["cuda", "nccl", "flash-attn"]
[[example]]
name = "reinforcement-learning"
required-features = ["pyo3"]
[[example]]
name = "onnx"
required-features = ["onnx"]
[[example]]
name = "onnx_basics"
required-features = ["onnx"]
[[example]]
name = "whisper"
required-features = ["symphonia"]
[[example]]
name = "whisper-microphone"
required-features = ["microphone"]
[[example]]
name = "mnist-training"
required-features = ["candle-datasets"]
[[example]]
name = "llama2-c"
required-features = ["candle-datasets"]
[[example]]
name = "encodec"
required-features = ["encodec"]
| candle/candle-examples/Cargo.toml/0 | {
"file_path": "candle/candle-examples/Cargo.toml",
"repo_id": "candle",
"token_count": 1090
} | 24 |
// This example illustrates how to implement custom operations. These operations can provide their
// own forward pass (CPU and GPU versions) as well as their backward pass.
//
// In this example we add the RMS normalization operation and implement it for f32.
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[rustfmt::skip]
#[cfg(feature = "cuda")]
mod cuda_kernels;
use clap::Parser;
use candle::{CpuStorage, CustomOp1, Layout, Result, Shape, Tensor};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
struct LayerNorm {
eps: f32,
}
impl CustomOp1 for LayerNorm {
fn name(&self) -> &'static str {
"layer-norm"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
let (dim1, dim2) = layout.shape().dims2()?;
let slice = storage.as_slice::<f32>()?;
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => &slice[o1..o2],
};
let mut dst = Vec::with_capacity(dim1 * dim2);
for idx1 in 0..dim1 {
let src = &src[idx1 * dim2..(idx1 + 1) * dim2];
let variance = src.iter().map(|x| x * x).sum::<f32>();
let s_variance = 1f32 / (variance / dim2 as f32 + self.eps).sqrt();
dst.extend(src.iter().map(|x| x * s_variance))
}
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, layout.shape().clone()))
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &candle::CudaStorage,
layout: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::{LaunchAsync, LaunchConfig};
use candle::cuda_backend::WrapErr;
let (d1, d2) = layout.shape().dims2()?;
let d1 = d1 as u32;
let d2 = d2 as u32;
let dev = storage.device().clone();
let slice = storage.as_cuda_slice::<f32>()?;
let slice = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => slice.slice(o1..o2),
};
let elem_count = layout.shape().elem_count();
let dst = unsafe { dev.alloc::<f32>(elem_count) }.w()?;
let func = dev.get_or_load_func("rms_f32", cuda_kernels::LAYERNORM_KERNELS)?;
let params = (&dst, &slice, self.eps, d1, d2);
let cfg = LaunchConfig {
grid_dim: (d1, 1, 1),
block_dim: (d2, 1, 1),
shared_mem_bytes: 0,
};
unsafe { func.launch(cfg, params) }.w()?;
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev);
Ok((dst, layout.shape().clone()))
}
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let t = Tensor::arange(0f32, 14f32, &device)?.reshape((2, 7))?;
println!("{t}");
let t = t.apply_op1(LayerNorm { eps: 1e-5 })?;
println!("{t}");
Ok(())
}
| candle/candle-examples/examples/custom-ops/main.rs/0 | {
"file_path": "candle/candle-examples/examples/custom-ops/main.rs",
"repo_id": "candle",
"token_count": 1475
} | 25 |
# candle-jina-bert
Jina-Bert is a general large language model with a context size of 8192, [model
card](https://huggingface.co/jinaai/jina-embeddings-v2-base-en). In this example
it can be used for two different tasks:
- Compute sentence embeddings for a prompt.
- Compute similarities between a set of sentences.
## Sentence embeddings
Jina-Bert is used to compute the sentence embeddings for a prompt. The model weights
are downloaded from the hub on the first run.
```bash
cargo run --example jina-bert --release -- --prompt "Here is a test sentence"
> [[[ 0.1595, -0.9885, 0.6494, ..., 0.3003, -0.6901, -1.2355],
> [ 0.0374, -0.1798, 1.3359, ..., 0.6731, 0.2133, -1.6807],
> [ 0.1700, -0.8534, 0.8924, ..., -0.1785, -0.0727, -1.5087],
> ...
> [-0.3113, -1.3665, 0.2027, ..., -0.2519, 0.1711, -1.5811],
> [ 0.0907, -1.0492, 0.5382, ..., 0.0242, -0.7077, -1.0830],
> [ 0.0369, -0.6343, 0.6105, ..., 0.0671, 0.3778, -1.1505]]]
> Tensor[[1, 7, 768], f32]
```
## Similarities
In this example, Jina-Bert is used to compute the sentence embeddings for a set of
sentences (hardcoded in the examples). Then cosine similarities are computed for
each sentence pair and they are reported by decreasing values, hence the first
reported pair contains the two sentences that have the highest similarity score.
The sentence embeddings are computed using average pooling through all the
sentence tokens, including some potential padding.
```bash
cargo run --example jina-bert --release
> score: 0.94 'The new movie is awesome' 'The new movie is so great'
> score: 0.81 'The cat sits outside' 'The cat plays in the garden'
> score: 0.78 'I love pasta' 'Do you like pizza?'
> score: 0.68 'I love pasta' 'The new movie is awesome'
> score: 0.67 'A man is playing guitar' 'A woman watches TV'
```
| candle/candle-examples/examples/jina-bert/README.md/0 | {
"file_path": "candle/candle-examples/examples/jina-bert/README.md",
"repo_id": "candle",
"token_count": 663
} | 26 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use clap::Parser;
use std::io::Write;
use candle_transformers::generation::LogitsProcessor;
use candle_transformers::models::encodec;
use candle_transformers::models::metavoice::{adapters, gpt, tokenizers, transformer};
use candle_transformers::models::quantized_metavoice::transformer as qtransformer;
use candle::{DType, IndexOp, Tensor};
use candle_nn::VarBuilder;
use hf_hub::api::sync::Api;
use rand::{distributions::Distribution, SeedableRng};
pub const ENCODEC_NTOKENS: u32 = 1024;
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum ArgDType {
F32,
F16,
Bf16,
}
enum Transformer {
Normal(transformer::Model),
Quantized(qtransformer::Model),
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
prompt: String,
/// Use the quantized version of the model.
#[arg(long)]
quantized: bool,
/// The guidance scale.
#[arg(long, default_value_t = 3.0)]
guidance_scale: f64,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 1.0)]
temperature: f64,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The maximum number of tokens to generate for the first stage.
#[arg(long, default_value_t = 2000)]
max_tokens: u64,
/// The output file using the wav format.
#[arg(long, default_value = "out.wav")]
out_file: String,
#[arg(long)]
first_stage_meta: Option<String>,
#[arg(long)]
first_stage_weights: Option<String>,
#[arg(long)]
second_stage_weights: Option<String>,
#[arg(long)]
encodec_weights: Option<String>,
#[arg(long)]
spk_emb: Option<String>,
#[arg(long, default_value = "f32")]
dtype: ArgDType,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let device = candle_examples::device(args.cpu)?;
let api = Api::new()?;
let repo = api.model("lmz/candle-metavoice".to_string());
let first_stage_meta = match &args.first_stage_meta {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage.meta.json")?,
};
let first_stage_meta: serde_json::Value =
serde_json::from_reader(&std::fs::File::open(first_stage_meta)?)?;
let first_stage_tokenizer = match first_stage_meta.as_object() {
None => anyhow::bail!("not a json object"),
Some(j) => match j.get("tokenizer") {
None => anyhow::bail!("no tokenizer key"),
Some(j) => j,
},
};
let fs_tokenizer = tokenizers::BPE::from_json(first_stage_tokenizer, 512)?;
let second_stage_weights = match &args.second_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("second_stage.safetensors")?,
};
let encodec_weights = match args.encodec_weights {
Some(w) => std::path::PathBuf::from(w),
None => Api::new()?
.model("facebook/encodec_24khz".to_string())
.get("model.safetensors")?,
};
let dtype = match args.dtype {
ArgDType::F32 => DType::F32,
ArgDType::F16 => DType::F16,
ArgDType::Bf16 => DType::BF16,
};
let first_stage_config = transformer::Config::cfg1b_v0_1();
let mut first_stage_model = if args.quantized {
let filename = match &args.first_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage_q4k.gguf")?,
};
let vb =
candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?;
let first_stage_model = qtransformer::Model::new(&first_stage_config, vb)?;
Transformer::Quantized(first_stage_model)
} else {
let first_stage_weights = match &args.first_stage_weights {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("first_stage.safetensors")?,
};
let first_stage_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[first_stage_weights], dtype, &device)? };
let first_stage_model = transformer::Model::new(&first_stage_config, first_stage_vb)?;
Transformer::Normal(first_stage_model)
};
let second_stage_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[second_stage_weights], dtype, &device)? };
let second_stage_config = gpt::Config::cfg1b_v0_1();
let second_stage_model = gpt::Model::new(second_stage_config.clone(), second_stage_vb)?;
let encodec_device = if device.is_metal() {
&candle::Device::Cpu
} else {
&device
};
let encodec_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[encodec_weights], dtype, encodec_device)? };
let encodec_config = encodec::Config::default();
let encodec_model = encodec::Model::new(&encodec_config, encodec_vb)?;
println!("prompt: '{}'", args.prompt);
let prompt_tokens = fs_tokenizer.encode(&args.prompt)?;
let mut tokens = prompt_tokens.clone();
println!("{tokens:?}");
let spk_emb_file = match &args.spk_emb {
Some(w) => std::path::PathBuf::from(w),
None => repo.get("spk_emb.safetensors")?,
};
let spk_emb = candle::safetensors::load(&spk_emb_file, &candle::Device::Cpu)?;
let spk_emb = match spk_emb.get("spk_emb") {
None => anyhow::bail!("missing spk_emb tensor in {spk_emb_file:?}"),
Some(spk_emb) => spk_emb.to_dtype(dtype)?,
};
let spk_emb = spk_emb.to_device(&device)?;
let mut logits_processor = LogitsProcessor::new(args.seed, Some(args.temperature), Some(0.95));
// First stage generation.
for index in 0..args.max_tokens {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &device)?;
let input = Tensor::stack(&[&input, &input], 0)?;
let logits = match &mut first_stage_model {
Transformer::Normal(m) => m.forward(&input, &spk_emb, tokens.len() - context_size)?,
Transformer::Quantized(m) => {
m.forward(&input, &spk_emb, tokens.len() - context_size)?
}
};
let logits0 = logits.i((0, 0))?;
let logits1 = logits.i((1, 0))?;
let logits = ((logits0 * args.guidance_scale)? + logits1 * (1. - args.guidance_scale))?;
let logits = logits.to_dtype(DType::F32)?;
let next_token = logits_processor.sample(&logits)?;
tokens.push(next_token);
print!(".");
std::io::stdout().flush()?;
if next_token == 2048 {
break;
}
}
println!();
let fie2c = adapters::FlattenedInterleavedEncodec2Codebook::new(ENCODEC_NTOKENS);
let (text_ids, ids1, ids2) = fie2c.decode(&tokens);
println!("text ids len: {}", text_ids.len());
let mut rng = rand::rngs::StdRng::seed_from_u64(args.seed + 1337);
// TODO: Use the config rather than hardcoding the offset here.
let encoded_text: Vec<_> = prompt_tokens.iter().map(|v| v - 1024).collect();
let mut hierarchies_in1 =
[encoded_text.as_slice(), ids1.as_slice(), &[ENCODEC_NTOKENS]].concat();
let mut hierarchies_in2 = [
vec![ENCODEC_NTOKENS; encoded_text.len()].as_slice(),
ids2.as_slice(),
&[ENCODEC_NTOKENS],
]
.concat();
hierarchies_in1.resize(second_stage_config.block_size, ENCODEC_NTOKENS);
hierarchies_in2.resize(second_stage_config.block_size, ENCODEC_NTOKENS);
let in_x1 = Tensor::new(hierarchies_in1, &device)?;
let in_x2 = Tensor::new(hierarchies_in2, &device)?;
let in_x = Tensor::stack(&[in_x1, in_x2], 0)?.unsqueeze(0)?;
let logits = second_stage_model.forward(&in_x)?;
println!("sampling from logits...");
let mut codes = vec![];
for logits in logits.iter() {
let logits = logits.squeeze(0)?;
let (seq_len, _) = logits.dims2()?;
let mut codes_ = Vec::with_capacity(seq_len);
for step in 0..seq_len {
let logits = logits.i(step)?.to_dtype(DType::F32)?;
let logits = &(&logits / 1.0)?;
let prs = candle_nn::ops::softmax_last_dim(logits)?.to_vec1::<f32>()?;
let distr = rand::distributions::WeightedIndex::new(prs.as_slice())?;
let sample = distr.sample(&mut rng) as u32;
codes_.push(sample)
}
codes.push(codes_)
}
let codes = Tensor::new(codes, &device)?.unsqueeze(0)?;
let codes = Tensor::cat(&[in_x, codes], 1)?;
println!("codes: {codes}");
let tilted_encodec = adapters::TiltedEncodec::new(ENCODEC_NTOKENS);
let codes = codes.i(0)?.to_vec2::<u32>()?;
let (text_ids, audio_ids) = tilted_encodec.decode(&codes);
println!("text_ids len: {:?}", text_ids.len());
let audio_ids = Tensor::new(audio_ids, encodec_device)?.unsqueeze(0)?;
println!("audio_ids shape: {:?}", audio_ids.shape());
let pcm = encodec_model.decode(&audio_ids)?;
println!("output pcm shape: {:?}", pcm.shape());
let pcm = pcm.i(0)?.i(0)?.to_dtype(DType::F32)?;
let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?;
let pcm = pcm.to_vec1::<f32>()?;
let mut output = std::fs::File::create(&args.out_file)?;
candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?;
Ok(())
}
| candle/candle-examples/examples/metavoice/main.rs/0 | {
"file_path": "candle/candle-examples/examples/metavoice/main.rs",
"repo_id": "candle",
"token_count": 4560
} | 27 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::io::Write;
use std::path::PathBuf;
use candle_transformers::models::quantized_t5 as t5;
use anyhow::{Error as E, Result};
use candle::{Device, Tensor};
use candle_transformers::generation::LogitsProcessor;
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, api::sync::ApiRepo, Repo, RepoType};
use tokenizers::Tokenizer;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
T5Small,
FlanT5Small,
FlanT5Base,
FlanT5Large,
FlanT5Xl,
FlanT5Xxl,
}
#[derive(Parser, Debug, Clone)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model repository to use on the HuggingFace hub.
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
weight_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
// Enable/disable decoding.
#[arg(long, default_value = "false")]
disable_cache: bool,
/// Use this prompt, otherwise compute sentence similarities.
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model size to use.
#[arg(long, default_value = "t5-small")]
which: Which,
}
struct T5ModelBuilder {
device: Device,
config: t5::Config,
weights_filename: PathBuf,
}
impl T5ModelBuilder {
pub fn load(args: &Args) -> Result<(Self, Tokenizer)> {
let device = Device::Cpu;
let default_model = "lmz/candle-quantized-t5".to_string();
let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, "main".to_string()),
};
let repo = Repo::with_revision(model_id, RepoType::Model, revision);
let api = Api::new()?;
let api = api.repo(repo);
let config_filename = match &args.config_file {
Some(filename) => Self::get_local_or_remote_file(filename, &api)?,
None => match args.which {
Which::T5Small => api.get("config.json")?,
Which::FlanT5Small => api.get("config-flan-t5-small.json")?,
Which::FlanT5Base => api.get("config-flan-t5-base.json")?,
Which::FlanT5Large => api.get("config-flan-t5-large.json")?,
Which::FlanT5Xl => api.get("config-flan-t5-xl.json")?,
Which::FlanT5Xxl => api.get("config-flan-t5-xxl.json")?,
},
};
let tokenizer_filename = api.get("tokenizer.json")?;
let weights_filename = match &args.weight_file {
Some(filename) => Self::get_local_or_remote_file(filename, &api)?,
None => match args.which {
Which::T5Small => api.get("model.gguf")?,
Which::FlanT5Small => api.get("model-flan-t5-small.gguf")?,
Which::FlanT5Base => api.get("model-flan-t5-base.gguf")?,
Which::FlanT5Large => api.get("model-flan-t5-large.gguf")?,
Which::FlanT5Xl => api.get("model-flan-t5-xl.gguf")?,
Which::FlanT5Xxl => api.get("model-flan-t5-xxl.gguf")?,
},
};
let config = std::fs::read_to_string(config_filename)?;
let mut config: t5::Config = serde_json::from_str(&config)?;
config.use_cache = !args.disable_cache;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
Ok((
Self {
device,
config,
weights_filename,
},
tokenizer,
))
}
pub fn build_model(&self) -> Result<t5::T5ForConditionalGeneration> {
let device = Device::Cpu;
let vb = t5::VarBuilder::from_gguf(&self.weights_filename, &device)?;
Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?)
}
fn get_local_or_remote_file(filename: &str, api: &ApiRepo) -> Result<PathBuf> {
let local_filename = std::path::PathBuf::from(filename);
if local_filename.exists() {
Ok(local_filename)
} else {
Ok(api.get(filename)?)
}
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?;
let device = &builder.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(args.prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
let mut model = builder.build_model()?;
let mut output_token_ids = [builder
.config
.decoder_start_token_id
.unwrap_or(builder.config.pad_token_id) as u32]
.to_vec();
let temperature = if args.temperature <= 0. {
None
} else {
Some(args.temperature)
};
let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p);
let encoder_output = model.encode(&input_token_ids)?;
let start = std::time::Instant::now();
for index in 0.. {
if output_token_ids.len() > 512 {
break;
}
let decoder_token_ids = if index == 0 || !builder.config.use_cache {
Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)?
} else {
let last_token = *output_token_ids.last().unwrap();
Tensor::new(&[last_token], device)?.unsqueeze(0)?
};
let logits = model
.decode(&decoder_token_ids, &encoder_output)?
.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&output_token_ids[start_at..],
)?
};
let next_token_id = logits_processor.sample(&logits)?;
if next_token_id as usize == builder.config.eos_token_id {
break;
}
output_token_ids.push(next_token_id);
if let Some(text) = tokenizer.id_to_token(next_token_id) {
let text = text.replace('โ', " ").replace("<0x0A>", "\n");
print!("{text}");
std::io::stdout().flush()?;
}
}
let dt = start.elapsed();
println!(
"\n{} tokens generated ({:.2} token/s)\n",
output_token_ids.len(),
output_token_ids.len() as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/quantized-t5/main.rs/0 | {
"file_path": "candle/candle-examples/examples/quantized-t5/main.rs",
"repo_id": "candle",
"token_count": 3631
} | 28 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::repvgg;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
A0,
A1,
A2,
B0,
B1,
B2,
B3,
B1G4,
B2G4,
B3G4,
}
impl Which {
fn model_filename(&self) -> String {
let name = match self {
Self::A0 => "a0",
Self::A1 => "a1",
Self::A2 => "a2",
Self::B0 => "b0",
Self::B1 => "b1",
Self::B2 => "b2",
Self::B3 => "b3",
Self::B1G4 => "b1g4",
Self::B2G4 => "b2g4",
Self::B3G4 => "b3g4",
};
format!("timm/repvgg_{}.rvgg_in1k", name)
}
fn config(&self) -> repvgg::Config {
match self {
Self::A0 => repvgg::Config::a0(),
Self::A1 => repvgg::Config::a1(),
Self::A2 => repvgg::Config::a2(),
Self::B0 => repvgg::Config::b0(),
Self::B1 => repvgg::Config::b1(),
Self::B2 => repvgg::Config::b2(),
Self::B3 => repvgg::Config::b3(),
Self::B1G4 => repvgg::Config::b1g4(),
Self::B2G4 => repvgg::Config::b2g4(),
Self::B3G4 => repvgg::Config::b3g4(),
}
}
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(value_enum, long, default_value_t=Which::A0)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let model_name = args.which.model_filename();
let api = hf_hub::api::sync::Api::new()?;
let api = api.model(model_name);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = repvgg::repvgg(&args.which.config(), 1000, vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/repvgg/main.rs/0 | {
"file_path": "candle/candle-examples/examples/repvgg/main.rs",
"repo_id": "candle",
"token_count": 1525
} | 29 |
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use candle_transformers::models::stable_diffusion;
use anyhow::{Error as E, Result};
use candle::{DType, Device, IndexOp, Module, Tensor, D};
use clap::Parser;
use tokenizers::Tokenizer;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The prompt to be used for image generation.
#[arg(
long,
default_value = "A very realistic photo of a rusty robot walking on a sandy beach"
)]
prompt: String,
#[arg(long, default_value = "")]
uncond_prompt: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The height in pixels of the generated image.
#[arg(long)]
height: Option<usize>,
/// The width in pixels of the generated image.
#[arg(long)]
width: Option<usize>,
/// The UNet weight file, in .safetensors format.
#[arg(long, value_name = "FILE")]
unet_weights: Option<String>,
/// The CLIP weight file, in .safetensors format.
#[arg(long, value_name = "FILE")]
clip_weights: Option<String>,
/// The VAE weight file, in .safetensors format.
#[arg(long, value_name = "FILE")]
vae_weights: Option<String>,
#[arg(long, value_name = "FILE")]
/// The file specifying the tokenizer to used for tokenization.
tokenizer: Option<String>,
/// The size of the sliced attention or 0 for automatic slicing (disabled by default)
#[arg(long)]
sliced_attention_size: Option<usize>,
/// The number of steps to run the diffusion for.
#[arg(long)]
n_steps: Option<usize>,
/// The number of samples to generate.
#[arg(long, default_value_t = 1)]
num_samples: i64,
/// The name of the final image to generate.
#[arg(long, value_name = "FILE", default_value = "sd_final.png")]
final_image: String,
#[arg(long, value_enum, default_value = "v2-1")]
sd_version: StableDiffusionVersion,
/// Generate intermediary images at each step.
#[arg(long, action)]
intermediary_images: bool,
#[arg(long)]
use_flash_attn: bool,
#[arg(long)]
use_f16: bool,
#[arg(long)]
guidance_scale: Option<f64>,
#[arg(long, value_name = "FILE")]
img2img: Option<String>,
/// The strength, indicates how much to transform the initial image. The
/// value must be between 0 and 1, a value of 1 discards the initial image
/// information.
#[arg(long, default_value_t = 0.8)]
img2img_strength: f64,
/// The seed to use when generating random samples.
#[arg(long)]
seed: Option<u64>,
}
#[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Eq)]
enum StableDiffusionVersion {
V1_5,
V2_1,
Xl,
Turbo,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum ModelFile {
Tokenizer,
Tokenizer2,
Clip,
Clip2,
Unet,
Vae,
}
impl StableDiffusionVersion {
fn repo(&self) -> &'static str {
match self {
Self::Xl => "stabilityai/stable-diffusion-xl-base-1.0",
Self::V2_1 => "stabilityai/stable-diffusion-2-1",
Self::V1_5 => "runwayml/stable-diffusion-v1-5",
Self::Turbo => "stabilityai/sdxl-turbo",
}
}
fn unet_file(&self, use_f16: bool) -> &'static str {
match self {
Self::V1_5 | Self::V2_1 | Self::Xl | Self::Turbo => {
if use_f16 {
"unet/diffusion_pytorch_model.fp16.safetensors"
} else {
"unet/diffusion_pytorch_model.safetensors"
}
}
}
}
fn vae_file(&self, use_f16: bool) -> &'static str {
match self {
Self::V1_5 | Self::V2_1 | Self::Xl | Self::Turbo => {
if use_f16 {
"vae/diffusion_pytorch_model.fp16.safetensors"
} else {
"vae/diffusion_pytorch_model.safetensors"
}
}
}
}
fn clip_file(&self, use_f16: bool) -> &'static str {
match self {
Self::V1_5 | Self::V2_1 | Self::Xl | Self::Turbo => {
if use_f16 {
"text_encoder/model.fp16.safetensors"
} else {
"text_encoder/model.safetensors"
}
}
}
}
fn clip2_file(&self, use_f16: bool) -> &'static str {
match self {
Self::V1_5 | Self::V2_1 | Self::Xl | Self::Turbo => {
if use_f16 {
"text_encoder_2/model.fp16.safetensors"
} else {
"text_encoder_2/model.safetensors"
}
}
}
}
}
impl ModelFile {
fn get(
&self,
filename: Option<String>,
version: StableDiffusionVersion,
use_f16: bool,
) -> Result<std::path::PathBuf> {
use hf_hub::api::sync::Api;
match filename {
Some(filename) => Ok(std::path::PathBuf::from(filename)),
None => {
let (repo, path) = match self {
Self::Tokenizer => {
let tokenizer_repo = match version {
StableDiffusionVersion::V1_5 | StableDiffusionVersion::V2_1 => {
"openai/clip-vit-base-patch32"
}
StableDiffusionVersion::Xl | StableDiffusionVersion::Turbo => {
// This seems similar to the patch32 version except some very small
// difference in the split regex.
"openai/clip-vit-large-patch14"
}
};
(tokenizer_repo, "tokenizer.json")
}
Self::Tokenizer2 => {
("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", "tokenizer.json")
}
Self::Clip => (version.repo(), version.clip_file(use_f16)),
Self::Clip2 => (version.repo(), version.clip2_file(use_f16)),
Self::Unet => (version.repo(), version.unet_file(use_f16)),
Self::Vae => {
// Override for SDXL when using f16 weights.
// See https://github.com/huggingface/candle/issues/1060
if matches!(
version,
StableDiffusionVersion::Xl | StableDiffusionVersion::Turbo,
) && use_f16
{
(
"madebyollin/sdxl-vae-fp16-fix",
"diffusion_pytorch_model.safetensors",
)
} else {
(version.repo(), version.vae_file(use_f16))
}
}
};
let filename = Api::new()?.model(repo.to_string()).get(path)?;
Ok(filename)
}
}
}
}
fn output_filename(
basename: &str,
sample_idx: i64,
num_samples: i64,
timestep_idx: Option<usize>,
) -> String {
let filename = if num_samples > 1 {
match basename.rsplit_once('.') {
None => format!("{basename}.{sample_idx}.png"),
Some((filename_no_extension, extension)) => {
format!("{filename_no_extension}.{sample_idx}.{extension}")
}
}
} else {
basename.to_string()
};
match timestep_idx {
None => filename,
Some(timestep_idx) => match filename.rsplit_once('.') {
None => format!("{filename}-{timestep_idx}.png"),
Some((filename_no_extension, extension)) => {
format!("{filename_no_extension}-{timestep_idx}.{extension}")
}
},
}
}
#[allow(clippy::too_many_arguments)]
fn text_embeddings(
prompt: &str,
uncond_prompt: &str,
tokenizer: Option<String>,
clip_weights: Option<String>,
sd_version: StableDiffusionVersion,
sd_config: &stable_diffusion::StableDiffusionConfig,
use_f16: bool,
device: &Device,
dtype: DType,
use_guide_scale: bool,
first: bool,
) -> Result<Tensor> {
let tokenizer_file = if first {
ModelFile::Tokenizer
} else {
ModelFile::Tokenizer2
};
let tokenizer = tokenizer_file.get(tokenizer, sd_version, use_f16)?;
let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?;
let pad_id = match &sd_config.clip.pad_with {
Some(padding) => *tokenizer.get_vocab(true).get(padding.as_str()).unwrap(),
None => *tokenizer.get_vocab(true).get("<|endoftext|>").unwrap(),
};
println!("Running with prompt \"{prompt}\".");
let mut tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
while tokens.len() < sd_config.clip.max_position_embeddings {
tokens.push(pad_id)
}
let tokens = Tensor::new(tokens.as_slice(), device)?.unsqueeze(0)?;
println!("Building the Clip transformer.");
let clip_weights_file = if first {
ModelFile::Clip
} else {
ModelFile::Clip2
};
let clip_weights = clip_weights_file.get(clip_weights, sd_version, false)?;
let clip_config = if first {
&sd_config.clip
} else {
sd_config.clip2.as_ref().unwrap()
};
let text_model =
stable_diffusion::build_clip_transformer(clip_config, clip_weights, device, DType::F32)?;
let text_embeddings = text_model.forward(&tokens)?;
let text_embeddings = if use_guide_scale {
let mut uncond_tokens = tokenizer
.encode(uncond_prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
while uncond_tokens.len() < sd_config.clip.max_position_embeddings {
uncond_tokens.push(pad_id)
}
let uncond_tokens = Tensor::new(uncond_tokens.as_slice(), device)?.unsqueeze(0)?;
let uncond_embeddings = text_model.forward(&uncond_tokens)?;
Tensor::cat(&[uncond_embeddings, text_embeddings], 0)?.to_dtype(dtype)?
} else {
text_embeddings.to_dtype(dtype)?
};
Ok(text_embeddings)
}
fn image_preprocess<T: AsRef<std::path::Path>>(path: T) -> anyhow::Result<Tensor> {
let img = image::io::Reader::open(path)?.decode()?;
let (height, width) = (img.height() as usize, img.width() as usize);
let height = height - height % 32;
let width = width - width % 32;
let img = img.resize_to_fill(
width as u32,
height as u32,
image::imageops::FilterType::CatmullRom,
);
let img = img.to_rgb8();
let img = img.into_raw();
let img = Tensor::from_vec(img, (height, width, 3), &Device::Cpu)?
.permute((2, 0, 1))?
.to_dtype(DType::F32)?
.affine(2. / 255., -1.)?
.unsqueeze(0)?;
Ok(img)
}
fn run(args: Args) -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let Args {
prompt,
uncond_prompt,
cpu,
height,
width,
n_steps,
tokenizer,
final_image,
sliced_attention_size,
num_samples,
sd_version,
clip_weights,
vae_weights,
unet_weights,
tracing,
use_f16,
guidance_scale,
use_flash_attn,
img2img,
img2img_strength,
seed,
..
} = args;
if !(0. ..=1.).contains(&img2img_strength) {
anyhow::bail!("img2img-strength should be between 0 and 1, got {img2img_strength}")
}
let _guard = if tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let guidance_scale = match guidance_scale {
Some(guidance_scale) => guidance_scale,
None => match sd_version {
StableDiffusionVersion::V1_5
| StableDiffusionVersion::V2_1
| StableDiffusionVersion::Xl => 7.5,
StableDiffusionVersion::Turbo => 0.,
},
};
let n_steps = match n_steps {
Some(n_steps) => n_steps,
None => match sd_version {
StableDiffusionVersion::V1_5
| StableDiffusionVersion::V2_1
| StableDiffusionVersion::Xl => 30,
StableDiffusionVersion::Turbo => 1,
},
};
let dtype = if use_f16 { DType::F16 } else { DType::F32 };
let sd_config = match sd_version {
StableDiffusionVersion::V1_5 => {
stable_diffusion::StableDiffusionConfig::v1_5(sliced_attention_size, height, width)
}
StableDiffusionVersion::V2_1 => {
stable_diffusion::StableDiffusionConfig::v2_1(sliced_attention_size, height, width)
}
StableDiffusionVersion::Xl => {
stable_diffusion::StableDiffusionConfig::sdxl(sliced_attention_size, height, width)
}
StableDiffusionVersion::Turbo => stable_diffusion::StableDiffusionConfig::sdxl_turbo(
sliced_attention_size,
height,
width,
),
};
let scheduler = sd_config.build_scheduler(n_steps)?;
let device = candle_examples::device(cpu)?;
if let Some(seed) = seed {
device.set_seed(seed)?;
}
let use_guide_scale = guidance_scale > 1.0;
let which = match sd_version {
StableDiffusionVersion::Xl | StableDiffusionVersion::Turbo => vec![true, false],
_ => vec![true],
};
let text_embeddings = which
.iter()
.map(|first| {
text_embeddings(
&prompt,
&uncond_prompt,
tokenizer.clone(),
clip_weights.clone(),
sd_version,
&sd_config,
use_f16,
&device,
dtype,
use_guide_scale,
*first,
)
})
.collect::<Result<Vec<_>>>()?;
let text_embeddings = Tensor::cat(&text_embeddings, D::Minus1)?;
println!("{text_embeddings:?}");
println!("Building the autoencoder.");
let vae_weights = ModelFile::Vae.get(vae_weights, sd_version, use_f16)?;
let vae = sd_config.build_vae(vae_weights, &device, dtype)?;
let init_latent_dist = match &img2img {
None => None,
Some(image) => {
let image = image_preprocess(image)?.to_device(&device)?;
Some(vae.encode(&image)?)
}
};
println!("Building the unet.");
let unet_weights = ModelFile::Unet.get(unet_weights, sd_version, use_f16)?;
let unet = sd_config.build_unet(unet_weights, &device, 4, use_flash_attn, dtype)?;
let t_start = if img2img.is_some() {
n_steps - (n_steps as f64 * img2img_strength) as usize
} else {
0
};
let bsize = 1;
let vae_scale = match sd_version {
StableDiffusionVersion::V1_5
| StableDiffusionVersion::V2_1
| StableDiffusionVersion::Xl => 0.18215,
StableDiffusionVersion::Turbo => 0.13025,
};
for idx in 0..num_samples {
let timesteps = scheduler.timesteps();
let latents = match &init_latent_dist {
Some(init_latent_dist) => {
let latents = (init_latent_dist.sample()? * vae_scale)?.to_device(&device)?;
if t_start < timesteps.len() {
let noise = latents.randn_like(0f64, 1f64)?;
scheduler.add_noise(&latents, noise, timesteps[t_start])?
} else {
latents
}
}
None => {
let latents = Tensor::randn(
0f32,
1f32,
(bsize, 4, sd_config.height / 8, sd_config.width / 8),
&device,
)?;
// scale the initial noise by the standard deviation required by the scheduler
(latents * scheduler.init_noise_sigma())?
}
};
let mut latents = latents.to_dtype(dtype)?;
println!("starting sampling");
for (timestep_index, ×tep) in timesteps.iter().enumerate() {
if timestep_index < t_start {
continue;
}
let start_time = std::time::Instant::now();
let latent_model_input = if use_guide_scale {
Tensor::cat(&[&latents, &latents], 0)?
} else {
latents.clone()
};
let latent_model_input = scheduler.scale_model_input(latent_model_input, timestep)?;
let noise_pred =
unet.forward(&latent_model_input, timestep as f64, &text_embeddings)?;
let noise_pred = if use_guide_scale {
let noise_pred = noise_pred.chunk(2, 0)?;
let (noise_pred_uncond, noise_pred_text) = (&noise_pred[0], &noise_pred[1]);
(noise_pred_uncond + ((noise_pred_text - noise_pred_uncond)? * guidance_scale)?)?
} else {
noise_pred
};
latents = scheduler.step(&noise_pred, timestep, &latents)?;
let dt = start_time.elapsed().as_secs_f32();
println!("step {}/{n_steps} done, {:.2}s", timestep_index + 1, dt);
if args.intermediary_images {
let image = vae.decode(&(&latents / vae_scale)?)?;
let image = ((image / 2.)? + 0.5)?.to_device(&Device::Cpu)?;
let image = (image * 255.)?.to_dtype(DType::U8)?.i(0)?;
let image_filename =
output_filename(&final_image, idx + 1, num_samples, Some(timestep_index + 1));
candle_examples::save_image(&image, image_filename)?
}
}
println!(
"Generating the final image for sample {}/{}.",
idx + 1,
num_samples
);
let image = vae.decode(&(&latents / vae_scale)?)?;
let image = ((image / 2.)? + 0.5)?.to_device(&Device::Cpu)?;
let image = (image.clamp(0f32, 1.)? * 255.)?.to_dtype(DType::U8)?.i(0)?;
let image_filename = output_filename(&final_image, idx + 1, num_samples, None);
candle_examples::save_image(&image, image_filename)?
}
Ok(())
}
fn main() -> Result<()> {
let args = Args::parse();
run(args)
}
| candle/candle-examples/examples/stable-diffusion/main.rs/0 | {
"file_path": "candle/candle-examples/examples/stable-diffusion/main.rs",
"repo_id": "candle",
"token_count": 9556
} | 30 |
# candle-yolo-v8: Object Detection and Pose Estimation
This is a port of [Ultralytics
YOLOv8](https://github.com/ultralytics/ultralytics). The implementation is based
on the [tinygrad
version](https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py)
and on the model architecture described in this
[issue](https://github.com/ultralytics/ultralytics/issues/189). The supported
tasks are object detection and pose estimation.
You can try this model online on the [Candle YOLOv8
Space](https://huggingface.co/spaces/lmz/candle-yolo). The model then fully runs
in your browser using WebAssembly - if you use a custom image it will never
leave your phone/computer!
## Running some example
### Object Detection
```bash
cargo run --example yolo-v8 --release -- candle-examples/examples/yolo-v8/assets/bike.jpg
```
This prints details about the detected objects and generates a `bike.pp.jpg` file.

Image source:
[wikimedia](https://commons.wikimedia.org/wiki/File:Leading_group,_Giro_d%27Italia_2021,_Stage_15.jpg).

### Pose Estimation
```bash
cargo run --example yolo-v8 --release -- \
candle-examples/examples/yolo-v8/assets/bike.jpg --task pose
```

### Command-line flags
- `--which`: select the model variant to be used, `n`, `s` , `m`, `l`, or `x` by
increasing size and quality.
- `--task`: `detect` for object detection and `pose` for pose estimation.
- `--legend-size`: the size of the characters to print.
- `--model`: use a local model file rather than downloading it from the hub.
| candle/candle-examples/examples/yolo-v8/README.md/0 | {
"file_path": "candle/candle-examples/examples/yolo-v8/README.md",
"repo_id": "candle",
"token_count": 562
} | 31 |
// Build script to run nvcc and generate the C glue code for launching the flash-attention kernel.
// The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment
// variable in order to cache the compiled artifacts and avoid recompiling too often.
use anyhow::{Context, Result};
use std::path::PathBuf;
const KERNEL_FILES: [&str; 17] = [
"kernels/flash_api.cu",
"kernels/flash_fwd_hdim128_fp16_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_sm80.cu",
];
fn main() -> Result<()> {
println!("cargo:rerun-if-changed=build.rs");
for kernel_file in KERNEL_FILES.iter() {
println!("cargo:rerun-if-changed={kernel_file}");
}
println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h");
println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h");
println!("cargo:rerun-if-changed=kernels/flash.h");
println!("cargo:rerun-if-changed=kernels/philox.cuh");
println!("cargo:rerun-if-changed=kernels/softmax.h");
println!("cargo:rerun-if-changed=kernels/utils.h");
println!("cargo:rerun-if-changed=kernels/kernel_traits.h");
println!("cargo:rerun-if-changed=kernels/block_info.h");
println!("cargo:rerun-if-changed=kernels/static_switch.h");
let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?);
let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") {
Err(_) =>
{
#[allow(clippy::redundant_clone)]
out_dir.clone()
}
Ok(build_dir) => {
let path = PathBuf::from(build_dir);
path.canonicalize().expect(&format!(
"Directory doesn't exists: {} (the current directory is {})",
&path.display(),
std::env::current_dir()?.display()
))
}
};
let kernels = KERNEL_FILES.iter().collect();
let builder = bindgen_cuda::Builder::default()
.kernel_paths(kernels)
.out_dir(build_dir.clone())
.arg("-std=c++17")
.arg("-O3")
.arg("-U__CUDA_NO_HALF_OPERATORS__")
.arg("-U__CUDA_NO_HALF_CONVERSIONS__")
.arg("-U__CUDA_NO_HALF2_OPERATORS__")
.arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__")
.arg("-Icutlass/include")
.arg("--expt-relaxed-constexpr")
.arg("--expt-extended-lambda")
.arg("--use_fast_math")
.arg("--verbose");
let out_file = build_dir.join("libflashattention.a");
builder.build_lib(out_file);
println!("cargo:rustc-link-search={}", build_dir.display());
println!("cargo:rustc-link-lib=flashattention");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=stdc++");
Ok(())
}
| candle/candle-flash-attn/build.rs/0 | {
"file_path": "candle/candle-flash-attn/build.rs",
"repo_id": "candle",
"token_count": 1604
} | 32 |
[package]
name = "candle-kernels"
version = "0.4.2"
edition = "2021"
description = "CUDA kernels for Candle"
repository = "https://github.com/huggingface/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT OR Apache-2.0"
[dependencies]
[build-dependencies]
bindgen_cuda = "0.1.1"
| candle/candle-kernels/Cargo.toml/0 | {
"file_path": "candle/candle-kernels/Cargo.toml",
"repo_id": "candle",
"token_count": 126
} | 33 |
#define _USE_MATH_DEFINES
#include<math.h>
#include<stdint.h>
#include "cuda_utils.cuh"
#define UNARY_OP(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ __forceinline__ T gelu_erf_fwd(T x) {
return x * normcdfg(x);
}
template<typename T>
__device__ __forceinline__ T gelu_fwd(T x) {
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha));
}
template<typename T>
__device__ __forceinline__ T elu_fwd(T x, T alpha) {
if (x > static_cast<T>(0)) {
return x;
}
return alpha * (expg(x) - static_cast<T>(1));
}
template<typename T>
__device__ __forceinline__ T relu_fwd(T x) {
T zero = 0.;
return maxg(x, zero);
}
template<typename T>
__device__ __forceinline__ T silu_fwd(T x) {
return x / (static_cast<T>(1) + expg(-x));
}
#define UNARY_OP1(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME param, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
#if __CUDA_ARCH__ >= 800
UNARY_OP(__nv_bfloat16, ucopy_bf16, x)
UNARY_OP(__nv_bfloat16, uneg_bf16, -x)
UNARY_OP(__nv_bfloat16, urecip_bf16, recipg(x))
UNARY_OP(__nv_bfloat16, uexp_bf16, expg(x))
UNARY_OP(__nv_bfloat16, ulog_bf16, logg(x))
UNARY_OP(__nv_bfloat16, usin_bf16, sing(x))
UNARY_OP(__nv_bfloat16, ucos_bf16, cosg(x))
UNARY_OP(__nv_bfloat16, utanh_bf16, tanhg(x))
UNARY_OP(__nv_bfloat16, uerf_bf16, erfg(x))
UNARY_OP(__nv_bfloat16, uceil_bf16, ceilg(x))
UNARY_OP(__nv_bfloat16, ufloor_bf16, floorg(x))
UNARY_OP(__nv_bfloat16, uround_bf16, roundg(x))
UNARY_OP(__nv_bfloat16, unormcdf_bf16, normcdfg(x))
UNARY_OP(__nv_bfloat16, uabs_bf16, absg(x))
UNARY_OP(__nv_bfloat16, usqr_bf16, x*x)
UNARY_OP(__nv_bfloat16, usqrt_bf16, sqrtg(x))
UNARY_OP(__nv_bfloat16, ugelu_bf16, gelu_fwd(x))
UNARY_OP(__nv_bfloat16, ugelu_erf_bf16, gelu_erf_fwd(x))
UNARY_OP(__nv_bfloat16, urelu_bf16, relu_fwd(x))
UNARY_OP1(__nv_bfloat16, uelu_bf16, elu_fwd(x, param))
UNARY_OP(__nv_bfloat16, usilu_bf16, silu_fwd(x))
UNARY_OP1(__nv_bfloat16, upowf_bf16, powg(x, param))
#endif
#if __CUDA_ARCH__ >= 530
UNARY_OP(__half, ucopy_f16, x)
UNARY_OP(__half, uneg_f16, -x)
UNARY_OP(__half, urecip_f16, recipg(x))
UNARY_OP(__half, uexp_f16, expg(x))
UNARY_OP(__half, ulog_f16, logg(x))
UNARY_OP(__half, usin_f16, sing(x))
UNARY_OP(__half, ucos_f16, cosg(x))
UNARY_OP(__half, utanh_f16, tanhg(x))
UNARY_OP(__half, uerf_f16, erfg(x))
UNARY_OP(__half, uceil_f16, ceilg(x))
UNARY_OP(__half, ufloor_f16, floorg(x))
UNARY_OP(__half, uround_f16, roundg(x))
UNARY_OP(__half, unormcdf_f16, normcdfg(x))
UNARY_OP(__half, uabs_f16, absg(x))
UNARY_OP(__half, usqr_f16, x*x)
UNARY_OP(__half, usqrt_f16, sqrtg(x))
UNARY_OP(__half, ugelu_f16, gelu_fwd(x))
UNARY_OP(__half, ugelu_erf_f16, gelu_erf_fwd(x))
UNARY_OP(__half, urelu_f16, relu_fwd(x))
UNARY_OP1(__half, uelu_f16, elu_fwd(x, param))
UNARY_OP(__half, usilu_f16, silu_fwd(x))
UNARY_OP1(__half, upowf_f16, powg(x, param))
#endif
UNARY_OP(uint8_t, ucopy_u8, x)
UNARY_OP(uint32_t, ucopy_u32, x)
UNARY_OP(int64_t, ucopy_i64, x)
UNARY_OP(float, ucopy_f32, x)
UNARY_OP(double, ucopy_f64, x)
UNARY_OP(float, uneg_f32, -x)
UNARY_OP(double, uneg_f64, -x)
UNARY_OP(float, urecip_f32, recipg(x))
UNARY_OP(double, urecip_f64, recipg(x))
UNARY_OP(float, uexp_f32, expg(x))
UNARY_OP(double, uexp_f64, expg(x))
UNARY_OP(float, ulog_f32, logg(x))
UNARY_OP(double, ulog_f64, logg(x))
UNARY_OP(float, usin_f32, sing(x))
UNARY_OP(double, usin_f64, sing(x))
UNARY_OP(float, ucos_f32, cosg(x))
UNARY_OP(double, ucos_f64, cosg(x))
UNARY_OP(float, utanh_f32, tanhg(x))
UNARY_OP(double, utanh_f64, tanhg(x))
UNARY_OP(float, uerf_f32, erfg(x))
UNARY_OP(double, uerf_f64, erfg(x))
UNARY_OP(float, uceil_f32, ceilg(x))
UNARY_OP(double, uceil_f64, ceilg(x))
UNARY_OP(float, ufloor_f32, floorg(x))
UNARY_OP(double, ufloor_f64, floorg(x))
UNARY_OP(float, uround_f32, roundg(x))
UNARY_OP(double, uround_f64, roundg(x))
UNARY_OP(float, unormcdf_f32, normcdfg(x))
UNARY_OP(double, unormcdf_f64, normcdfg(x))
UNARY_OP(float, uabs_f32, absg(x))
UNARY_OP(double, uabs_f64, absg(x))
UNARY_OP(float, usqr_f32, x*x)
UNARY_OP(double, usqr_f64, x*x)
UNARY_OP(float, usqrt_f32, sqrtg(x))
UNARY_OP(double, usqrt_f64, sqrtg(x))
UNARY_OP(float, ugelu_f32, gelu_fwd(x))
UNARY_OP(double, ugelu_f64, gelu_fwd(x))
UNARY_OP(float, ugelu_erf_f32, gelu_erf_fwd(x))
UNARY_OP(double, ugelu_erf_f64, gelu_erf_fwd(x))
UNARY_OP(float, urelu_f32, relu_fwd(x))
UNARY_OP(double, urelu_f64, relu_fwd(x))
UNARY_OP1(float, uelu_f32, elu_fwd(x, param))
UNARY_OP1(double, uelu_f64, elu_fwd(x, param))
UNARY_OP(float, usilu_f32, silu_fwd(x))
UNARY_OP(double, usilu_f64, silu_fwd(x))
UNARY_OP1(float, upowf_f32, powg(x, param))
UNARY_OP1(double, upowf_f64, powg(x, param))
| candle/candle-kernels/src/unary.cu/0 | {
"file_path": "candle/candle-kernels/src/unary.cu",
"repo_id": "candle",
"token_count": 3386
} | 34 |
use candle_metal_kernels::{call_affine, Kernels};
use metal::objc::rc::autoreleasepool;
use metal::{Device, MTLResourceOptions};
use rand;
use std::any::type_name;
use std::time::Instant;
fn main() {
let device = Device::system_default().unwrap();
let kernels = Kernels::new();
let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>();
let f32_10k = (0..10000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
let f32_100k = (0..100000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}",
"dtype", "kernel", "size", "runs", "total time", "avg time"
);
// f32
run_affine_bench(&device, &kernels, &f32_1k);
run_affine_bench(&device, &kernels, &f32_10k);
run_affine_bench(&device, &kernels, &f32_100k);
}
fn run_affine_bench<T: Clone>(device: &Device, kernels: &Kernels, v: &[T]) {
let command_queue = device.new_command_queue();
let options = MTLResourceOptions::StorageModeManaged;
let iterations = 10000;
let input = device.new_buffer_with_data(
v.as_ptr() as *const core::ffi::c_void,
core::mem::size_of_val(v) as u64,
options,
);
let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options);
let mul: f32 = 1.2345;
let add: f32 = 2.3456;
let total_time = autoreleasepool(|| {
let command_buffer = command_queue.new_command_buffer();
let start = Instant::now();
for _ in 0..iterations {
call_affine(
&device,
command_buffer,
&kernels,
"affine_float",
v.len(),
&input,
&mut output,
mul,
add,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
start.elapsed()
});
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}",
type_name::<T>().split("::").last().unwrap(),
"affine",
v.len(),
iterations,
total_time,
total_time / iterations
);
}
| candle/candle-metal-kernels/tmp/affine.rs/0 | {
"file_path": "candle/candle-metal-kernels/tmp/affine.rs",
"repo_id": "candle",
"token_count": 1154
} | 35 |
//! Layer Normalization.
//!
//! This layer applies Layer Normalization over a mini-batch of inputs as described in [`Layer
//! Normalization`]. The input is expected to have three dimensions: a batch dimension, a length,
//! and a hidden size, the normalization is applied over the last dimension.
//!
//! # Example
//!
//! ```rust
//! use candle::{Tensor, Device::Cpu, test_utils::to_vec3_round};
//! use candle_nn::{LayerNorm, Module};
//! # fn main() -> candle::Result<()> {
//!
//! let w = Tensor::new(1f32, &Cpu)?;
//! let b = Tensor::new(0f32, &Cpu)?;
//! let layer = LayerNorm::new(w, b, 1e-5);
//!
//! let xs = Tensor::new(
//! &[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]],
//! &Cpu)?;
//! let ys = layer.forward(&xs)?;
//! assert_eq!(
//! to_vec3_round(&ys, 4)?,
//! &[[[-1.2247, 0.0, 1.2247],
//! [-1.2247, 0.0, 1.2247],
//! [ 1.2247, 0.0, -1.2247]]]);
//! # Ok(()) }
//! ```
//!
//! [`Layer Normalization`]: https://arxiv.org/abs/1607.06450
use candle::{DType, Result, Tensor, D};
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct LayerNormConfig {
pub eps: f64,
/// Whether to remove the mean or not, the default is true and when set to false, this turns
/// this layer into RmsNorm.
pub remove_mean: bool,
pub affine: bool,
}
impl Default for LayerNormConfig {
fn default() -> Self {
Self {
eps: 1e-5,
remove_mean: true,
affine: true,
}
}
}
impl From<f64> for LayerNormConfig {
fn from(eps: f64) -> Self {
Self {
eps,
remove_mean: true,
affine: true,
}
}
}
// This layer norm version handles both weight and bias so removes the mean.
#[derive(Clone, Debug)]
pub struct LayerNorm {
weight: Tensor,
bias: Option<Tensor>,
remove_mean: bool,
eps: f64,
}
impl LayerNorm {
pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self {
Self {
weight,
bias: Some(bias),
remove_mean: true,
eps,
}
}
pub fn new_no_bias(weight: Tensor, eps: f64) -> Self {
Self {
weight,
bias: None,
remove_mean: true,
eps,
}
}
pub fn rms_norm(weight: Tensor, eps: f64) -> Self {
Self {
weight,
bias: None,
remove_mean: false,
eps,
}
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl crate::Module for LayerNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let x = if self.remove_mean {
let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
x.broadcast_sub(&mean_x)?
} else {
x
};
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
let x = x_normed.to_dtype(x_dtype)?.broadcast_mul(&self.weight)?;
match &self.bias {
None => Ok(x),
Some(bias) => x.broadcast_add(bias),
}
}
}
pub fn layer_norm<C: Into<LayerNormConfig>>(
size: usize,
config: C,
vb: crate::VarBuilder,
) -> Result<LayerNorm> {
let config = config.into();
let weight = vb.get_with_hints(size, "weight", crate::Init::Const(1.))?;
let bias = if config.affine {
Some(vb.get_with_hints(size, "bias", crate::Init::Const(0.))?)
} else {
None
};
Ok(LayerNorm {
weight,
bias,
remove_mean: config.remove_mean,
eps: config.eps,
})
}
/// RmsNorm is a specialized version of the LayerNorm module.
#[derive(Clone, Debug)]
pub struct RmsNorm(LayerNorm);
impl RmsNorm {
pub fn new(weight: Tensor, eps: f64) -> Self {
Self(LayerNorm::rms_norm(weight, eps))
}
pub fn into_inner(self) -> LayerNorm {
self.0
}
}
impl crate::Module for RmsNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.0.forward(xs)
}
}
pub fn rms_norm(size: usize, eps: f64, vb: crate::VarBuilder) -> Result<RmsNorm> {
let config = LayerNormConfig {
eps,
remove_mean: false,
affine: false,
};
Ok(RmsNorm(layer_norm(size, config, vb)?))
}
| candle/candle-nn/src/layer_norm.rs/0 | {
"file_path": "candle/candle-nn/src/layer_norm.rs",
"repo_id": "candle",
"token_count": 2263
} | 36 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::test_utils::{to_vec0_round, to_vec2_round};
use anyhow::Result;
use candle::{DType, Device, Tensor, Var};
use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD};
#[test]
fn sgd_optim() -> Result<()> {
let x = Var::new(0f32, &Device::Cpu)?;
let mut sgd = SGD::new(vec![x.clone()], 0.1)?;
let xt = x.as_tensor();
for _step in 0..100 {
let loss = ((xt - 4.2)? * (xt - 4.2)?)?;
sgd.backward_step(&loss)?
}
assert_eq!(x.to_scalar::<f32>()?, 4.199999);
Ok(())
}
/* The results of this test have been checked against the following PyTorch code.
import torch
from torch import optim
w_gen = torch.tensor([[3., 1.]])
b_gen = torch.tensor([-2.])
sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]])
sample_ys = sample_xs.matmul(w_gen.t()) + b_gen
m = torch.nn.Linear(2, 1)
with torch.no_grad():
m.weight.zero_()
m.bias.zero_()
optimizer = optim.SGD(m.parameters(), lr=0.004, momentum=0.)
for _step in range(1000):
optimizer.zero_grad()
ys = m(sample_xs)
loss = ((ys - sample_ys)**2).sum()
loss.backward()
optimizer.step()
print(m.weight)
print(m.bias)
*/
#[test]
fn sgd_linear_regression() -> Result<()> {
// Generate some linear data, y = 3.x1 + x2 - 2.
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
// Now use backprop to run a linear regression between samples and get the coefficients back.
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut sgd = SGD::new(vec![w.clone(), b.clone()], 0.004)?;
let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone()));
for _step in 0..1000 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
sgd.backward_step(&loss)?;
}
assert_eq!(w.to_vec2::<f32>()?, &[[2.9983196, 0.99790204]]);
assert_eq!(b.to_scalar::<f32>()?, -1.9796902);
Ok(())
}
/* The following test returns the same values as the PyTorch code below.
import torch
from torch import optim
w_gen = torch.tensor([[3., 1.]])
b_gen = torch.tensor([-2.])
sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]])
sample_ys = sample_xs.matmul(w_gen.t()) + b_gen
m = torch.nn.Linear(2, 1)
with torch.no_grad():
m.weight.zero_()
m.bias.zero_()
optimizer = optim.AdamW(m.parameters(), lr=0.1)
for _step in range(100):
optimizer.zero_grad()
ys = m(sample_xs)
loss = ((ys - sample_ys)**2).sum()
loss.backward()
optimizer.step()
print(m.weight)
print(m.bias)
*/
#[test]
fn adamw_linear_regression() -> Result<()> {
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
// Now use backprop to run a linear regression between samples and get the coefficients back.
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let params = ParamsAdamW {
lr: 0.1,
..Default::default()
};
let mut opt = AdamW::new(vec![w.clone(), b.clone()], params)?;
let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone()));
for _step in 0..100 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
opt.backward_step(&loss)?;
}
assert_eq!(to_vec2_round(w.as_tensor(), 4)?, &[[2.7257, 0.7097]]);
assert_eq!(to_vec0_round(b.as_tensor(), 4)?, 0.7873);
Ok(())
}
#[test]
fn adamw_linear_regression_varmap() -> Result<()> {
use candle_nn::Init::Const;
// Similar as the previous test but using a VarMap.
let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?;
let b_gen = Tensor::new(-2f32, &Device::Cpu)?;
let gen = Linear::new(w_gen, Some(b_gen));
let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?;
let sample_ys = gen.forward(&sample_xs)?;
let mut var_map = candle_nn::VarMap::new();
let w = var_map.get((1, 2), "w", Const(0.), DType::F32, &Device::Cpu)?;
let b = var_map.get((), "b", Const(0.), DType::F32, &Device::Cpu)?;
let params = ParamsAdamW {
lr: 0.1,
..Default::default()
};
let mut opt = AdamW::new(var_map.all_vars(), params)?;
let lin = Linear::new(w, Some(b));
for _step in 0..100 {
let ys = lin.forward(&sample_xs)?;
let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?;
opt.backward_step(&loss)?;
}
assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[2.7257, 0.7097]]);
assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 0.7873);
var_map.set([("w", Tensor::zeros((1, 2), DType::F32, &Device::Cpu)?)].into_iter())?;
var_map.set([("b", Tensor::ones((), DType::F32, &Device::Cpu)?)].into_iter())?;
assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[0., 0.]]);
assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 1.);
Ok(())
}
| candle/candle-nn/tests/optim.rs/0 | {
"file_path": "candle/candle-nn/tests/optim.rs",
"repo_id": "candle",
"token_count": 2568
} | 37 |
import logging
try:
from .candle import *
except ImportError as e:
# If we are in development mode, or we did not bundle the DLLs, we try to locate them here
# PyO3 wont give us any information about what DLLs are missing, so we can only try to load
# the DLLs and re-import the module
logging.warning("DLLs were not bundled with this package. Trying to locate them...")
import os
import platform
def locate_cuda_dlls():
logging.warning("Locating CUDA DLLs...")
# Try to locate CUDA_PATH environment variable
cuda_path = os.environ.get("CUDA_PATH", None)
if cuda_path:
logging.warning(f"Found CUDA_PATH environment variable: {cuda_path}")
if platform.system() == "Windows":
cuda_path = os.path.join(cuda_path, "bin")
else:
cuda_path = os.path.join(cuda_path, "lib64")
logging.warning(f"Adding {cuda_path} to DLL search path...")
os.add_dll_directory(cuda_path)
else:
logging.warning("CUDA_PATH environment variable not found!")
def locate_mkl_dlls():
# Try to locate ONEAPI_ROOT environment variable
oneapi_root = os.environ.get("ONEAPI_ROOT", None)
if oneapi_root:
if platform.system() == "Windows":
mkl_path = os.path.join(
oneapi_root, "compiler", "latest", "windows", "redist", "intel64_win", "compiler"
)
else:
mkl_path = os.path.join(oneapi_root, "mkl", "latest", "lib", "intel64")
logging.warning(f"Adding {mkl_path} to DLL search path...")
os.add_dll_directory(mkl_path)
else:
logging.warning("ONEAPI_ROOT environment variable not found!")
locate_cuda_dlls()
locate_mkl_dlls()
try:
from .candle import *
except ImportError as inner_e:
raise ImportError("Could not locate DLLs. Please check the documentation for more information.")
__doc__ = candle.__doc__
if hasattr(candle, "__all__"):
__all__ = candle.__all__
| candle/candle-pyo3/py_src/candle/__init__.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/__init__.py",
"repo_id": "candle",
"token_count": 919
} | 38 |
# Generated content DO NOT EDIT
from .. import utils
cuda_is_available = utils.cuda_is_available
get_num_threads = utils.get_num_threads
has_accelerate = utils.has_accelerate
has_mkl = utils.has_mkl
load_ggml = utils.load_ggml
load_gguf = utils.load_gguf
load_safetensors = utils.load_safetensors
save_gguf = utils.save_gguf
save_safetensors = utils.save_safetensors
| candle/candle-pyo3/py_src/candle/utils/__init__.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.py",
"repo_id": "candle",
"token_count": 150
} | 39 |
import candle
from candle import Tensor
from candle.utils import cuda_is_available
from candle.testing import assert_equal
import pytest
def test_tensor_can_be_constructed():
t = Tensor(42.0)
assert t.values() == 42.0
def test_tensor_can_be_constructed_from_list():
t = Tensor([3.0, 1, 4, 1, 5, 9, 2, 6])
assert t.values() == [3.0, 1, 4, 1, 5, 9, 2, 6]
def test_tensor_can_be_constructed_from_list_of_lists():
t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]])
assert t.values() == [[3.0, 1, 4, 1], [5, 9, 2, 6]]
def test_tensor_can_be_quantized():
t = candle.randn((16, 256))
for format in [
"q4_0",
"q4_1",
"q5_0",
"q5_1",
"q8_0",
"q2k",
"q3k",
"q4k",
"q5k",
"q8k",
]:
for formatted_format in [format.upper(), format.lower()]:
quant_t = t.quantize(formatted_format)
assert quant_t.ggml_dtype.lower() == format.lower()
assert quant_t.shape == t.shape
def test_tensor_can_be_indexed():
t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]])
assert t[0].values() == [3.0, 1.0, 4.0, 1.0]
assert t[1].values() == [5.0, 9.0, 2.0, 6.0]
assert t[-1].values() == [5.0, 9.0, 2.0, 6.0]
assert t[-2].values() == [3.0, 1.0, 4.0, 1.0]
def test_tensor_can_be_sliced():
t = Tensor([3.0, 1, 4, 10, 5, 9, 2, 6])
assert t[0:4].values() == [3.0, 1.0, 4.0, 10.0]
assert t[4:8].values() == [5.0, 9.0, 2.0, 6.0]
assert t[-4:].values() == [5.0, 9.0, 2.0, 6.0]
assert t[:-4].values() == [3.0, 1.0, 4.0, 10.0]
assert t[-4:-2].values() == [5.0, 9.0]
assert t[...].values() == t.values()
def test_tensor_can_be_sliced_2d():
t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]])
assert t[:, 0].values() == [3.0, 5]
assert t[:, 1].values() == [1.0, 9.0]
assert t[0, 0].values() == 3.0
assert t[:, -1].values() == [1.0, 6.0]
assert t[:, -4].values() == [3.0, 5]
assert t[..., 0].values() == [3.0, 5]
def test_tensor_can_be_scliced_3d():
t = Tensor([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]])
assert t[:, :, 0].values() == [[1, 5], [9, 13]]
assert t[:, :, 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]]
assert t[:, 0, 0].values() == [1, 9]
assert t[..., 0].values() == [[1, 5], [9, 13]]
assert t[..., 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]]
def assert_bool(t: Tensor, expected: bool):
assert t.shape == ()
assert str(t.dtype) == str(candle.u8)
assert bool(t.values()) == expected
def test_tensor_supports_equality_operations_with_scalars():
t = Tensor(42.0)
assert_bool(t == 42.0, True)
assert_bool(t == 43.0, False)
assert_bool(t != 42.0, False)
assert_bool(t != 43.0, True)
assert_bool(t > 41.0, True)
assert_bool(t > 42.0, False)
assert_bool(t >= 41.0, True)
assert_bool(t >= 42.0, True)
assert_bool(t < 43.0, True)
assert_bool(t < 42.0, False)
assert_bool(t <= 43.0, True)
assert_bool(t <= 42.0, True)
def test_tensor_supports_equality_operations_with_tensors():
t = Tensor(42.0)
same = Tensor(42.0)
other = Tensor(43.0)
assert_bool(t == same, True)
assert_bool(t == other, False)
assert_bool(t != same, False)
assert_bool(t != other, True)
assert_bool(t > same, False)
assert_bool(t > other, False)
assert_bool(t >= same, True)
assert_bool(t >= other, False)
assert_bool(t < same, False)
assert_bool(t < other, True)
assert_bool(t <= same, True)
assert_bool(t <= other, True)
def test_tensor_equality_operations_can_broadcast():
# Create a decoder attention mask as a test case
# e.g.
# [[1,0,0]
# [1,1,0]
# [1,1,1]]
mask_cond = candle.Tensor([0, 1, 2])
mask = mask_cond < (mask_cond + 1).reshape((3, 1))
assert mask.shape == (3, 3)
assert_equal(mask, Tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]]).to_dtype(candle.u8))
def test_tensor_can_be_hashed():
t = Tensor(42.0)
other = Tensor(42.0)
# Hash should represent a unique tensor
assert hash(t) != hash(other)
assert hash(t) == hash(t)
def test_tensor_can_be_expanded_with_none():
t = candle.rand((12, 12))
b = t[None]
assert b.shape == (1, 12, 12)
c = t[:, None, None, :]
assert c.shape == (12, 1, 1, 12)
d = t[None, :, None, :]
assert d.shape == (1, 12, 1, 12)
e = t[None, None, :, :]
assert e.shape == (1, 1, 12, 12)
f = t[:, :, None]
assert f.shape == (12, 12, 1)
def test_tensor_can_be_index_via_tensor():
t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]])
indexed = t[candle.Tensor([0, 2])]
assert indexed.shape == (2, 4)
assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]]
indexed = t[:, candle.Tensor([0, 2])]
assert indexed.shape == (3, 2)
assert indexed.values() == [[1, 1], [3, 3], [5, 5]]
def test_tensor_can_be_index_via_list():
t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]])
indexed = t[[0, 2]]
assert indexed.shape == (2, 4)
assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]]
indexed = t[:, [0, 2]]
assert indexed.shape == (3, 2)
assert indexed.values() == [[1, 1], [3, 3], [5, 5]]
def test_tensor_can_be_cast_via_to():
t = Tensor(42.0)
assert str(t.dtype) == str(candle.f32)
t_new_args = t.to(candle.f64)
assert str(t_new_args.dtype) == str(candle.f64)
t_new_kwargs = t.to(dtype=candle.f64)
assert str(t_new_kwargs.dtype) == str(candle.f64)
pytest.raises(TypeError, lambda: t.to("not a dtype"))
pytest.raises(TypeError, lambda: t.to(dtype="not a dtype"))
pytest.raises(TypeError, lambda: t.to(candle.f64, "not a dtype"))
pytest.raises(TypeError, lambda: t.to())
pytest.raises(ValueError, lambda: t.to(candle.f16, dtype=candle.f64))
pytest.raises(ValueError, lambda: t.to(candle.f16, candle.f16))
other = Tensor(42.0).to(candle.f64)
t_new_other_args = t.to(other)
assert str(t_new_other_args.dtype) == str(candle.f64)
t_new_other_kwargs = t.to(other=other)
assert str(t_new_other_kwargs.dtype) == str(candle.f64)
@pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available")
def test_tensor_can_be_moved_via_to():
t = Tensor(42.0)
assert t.device == "cpu"
t_new_args = t.to("cuda")
assert t_new_args.device == "cuda"
t_new_kwargs = t.to(device="cuda")
assert t_new_kwargs.device == "cuda"
pytest.raises(TypeError, lambda: t.to("not a device"))
pytest.raises(TypeError, lambda: t.to(device="not a device"))
pytest.raises(TypeError, lambda: t.to("cuda", "not a device"))
pytest.raises(TypeError, lambda: t.to())
pytest.raises(ValueError, lambda: t.to("cuda", device="cpu"))
pytest.raises(ValueError, lambda: t.to("cuda", "cuda"))
other = Tensor(42.0).to("cuda")
t_new_other_args = t.to(other)
assert t_new_other_args.device == "cuda"
t_new_other_kwargs = t.to(other=other)
assert t_new_other_kwargs.device == "cuda"
@pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available")
def test_tensor_can_be_moved_and_cast_via_to():
t = Tensor(42.0)
assert t.device == "cpu"
assert str(t.dtype) == str(candle.f32)
t_new_args = t.to("cuda", candle.f64)
assert t_new_args.device == "cuda"
assert str(t_new_args.dtype) == str(candle.f64)
t_new_kwargs = t.to(device="cuda", dtype=candle.f64)
assert t_new_kwargs.device == "cuda"
assert str(t_new_kwargs.dtype) == str(candle.f64)
other = Tensor(42.0).to("cuda").to(candle.f64)
t_new_other_args = t.to(other)
assert t_new_other_args.device == "cuda"
assert str(t_new_other_args.dtype) == str(candle.f64)
t_new_other_kwargs = t.to(other=other)
assert t_new_other_kwargs.device == "cuda"
assert str(t_new_other_kwargs.dtype) == str(candle.f64)
def test_tensor_can_be_added():
t = Tensor(42.0)
result = t + t
assert result.values() == 84.0
result = t + 2.0
assert result.values() == 44.0
a = candle.rand((3, 1, 4))
b = candle.rand((2, 1))
c_native = a.broadcast_add(b)
c = a + b
assert c.shape == (3, 2, 4)
assert c.values() == c_native.values()
with pytest.raises(ValueError):
d = candle.rand((3, 4, 5))
e = candle.rand((4, 6))
f = d + e
def test_tensor_can_be_subtracted():
t = Tensor(42.0)
result = t - t
assert result.values() == 0
result = t - 2.0
assert result.values() == 40.0
a = candle.rand((3, 1, 4))
b = candle.rand((2, 1))
c_native = a.broadcast_sub(b)
c = a - b
assert c.shape == (3, 2, 4)
assert c.values() == c_native.values()
with pytest.raises(ValueError):
d = candle.rand((3, 4, 5))
e = candle.rand((4, 6))
f = d - e
def test_tensor_can_be_multiplied():
t = Tensor(42.0)
result = t * t
assert result.values() == 1764.0
result = t * 2.0
assert result.values() == 84.0
a = candle.rand((3, 1, 4))
b = candle.rand((2, 1))
c_native = a.broadcast_mul(b)
c = a * b
assert c.shape == (3, 2, 4)
assert c.values() == c_native.values()
with pytest.raises(ValueError):
d = candle.rand((3, 4, 5))
e = candle.rand((4, 6))
f = d * e
def test_tensor_can_be_divided():
t = Tensor(42.0)
result = t / t
assert result.values() == 1.0
result = t / 2.0
assert result.values() == 21.0
a = candle.rand((3, 1, 4))
b = candle.rand((2, 1))
c_native = a.broadcast_div(b)
c = a / b
assert c.shape == (3, 2, 4)
assert c.values() == c_native.values()
with pytest.raises(ValueError):
d = candle.rand((3, 4, 5))
e = candle.rand((4, 6))
f = d / e
| candle/candle-pyo3/tests/native/test_tensor.py/0 | {
"file_path": "candle/candle-pyo3/tests/native/test_tensor.py",
"repo_id": "candle",
"token_count": 4688
} | 40 |
//! EfficientViT (MSRA) inference implementation based on timm.
//!
//! See "Ef๏ฌcientViT: Memory Ef๏ฌcient Vision Transformer with Cascaded Group Attention"
//! https://arxiv.org/abs/2305.07027
//! https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/efficientvit_msra.py
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, linear, ops::sigmoid, ops::softmax, Conv2dConfig, Func,
VarBuilder,
};
#[derive(Clone)]
pub struct Config {
channels: [usize; 3],
blocks: [usize; 3],
heads: [usize; 3],
kernels: [usize; 4],
}
impl Config {
pub fn m0() -> Self {
Self {
channels: [64, 128, 192],
blocks: [1, 2, 3],
heads: [4, 4, 4],
kernels: [5, 5, 5, 5],
}
}
pub fn m1() -> Self {
Self {
channels: [128, 144, 192],
blocks: [1, 2, 3],
heads: [2, 3, 3],
kernels: [7, 5, 3, 3],
}
}
pub fn m2() -> Self {
Self {
channels: [128, 192, 224],
blocks: [1, 2, 3],
heads: [4, 3, 2],
kernels: [7, 5, 3, 3],
}
}
pub fn m3() -> Self {
Self {
channels: [128, 240, 320],
blocks: [1, 2, 3],
heads: [4, 3, 4],
kernels: [5, 5, 5, 5],
}
}
pub fn m4() -> Self {
Self {
channels: [128, 256, 384],
blocks: [1, 2, 3],
heads: [4, 4, 4],
kernels: [7, 5, 3, 3],
}
}
pub fn m5() -> Self {
Self {
channels: [192, 288, 384],
blocks: [1, 3, 4],
heads: [3, 3, 4],
kernels: [7, 5, 3, 3],
}
}
}
fn efficientvit_stemblock(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 3, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply_t(&bn, false)?;
Ok(xs)
}))
}
fn efficientvit_stem(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv1 = efficientvit_stemblock(3, dim / 8, vb.pp("conv1"))?;
let conv2 = efficientvit_stemblock(dim / 8, dim / 4, vb.pp("conv2"))?;
let conv3 = efficientvit_stemblock(dim / 4, dim / 2, vb.pp("conv3"))?;
let conv4 = efficientvit_stemblock(dim / 2, dim, vb.pp("conv4"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.relu()?
.apply(&conv2)?
.relu()?
.apply(&conv3)?
.relu()?
.apply(&conv4)?;
Ok(xs)
}))
}
fn depthwise_conv(
channels: usize,
kernel: usize,
stride: usize,
padding: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding,
groups: channels,
..Default::default()
};
let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn pointwise_conv(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn conv_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let pw1 = pointwise_conv(in_channels, out_channels, vb.pp("pw1"))?;
let pw2 = pointwise_conv(out_channels, in_channels, vb.pp("pw2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&pw1)?.relu()?.apply(&pw2)?;
Ok(xs)
}))
}
// Fixed per-stage resolutions
const RESOLUTIONS: [usize; 3] = [14, 7, 4];
// Attention block
fn efficientvit_attn(
cfg: &Config,
stage: usize,
in_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cga = cascaded_group_attn(cfg, stage, in_channels, vb)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (b, c, h, w) = xs.dims4()?;
let win_res = 7; // Fixed window resolution
let pad_b = (win_res - h % win_res) % win_res;
let pad_r = (win_res - w % win_res) % win_res;
let ph = h + pad_b;
let pw = w + pad_r;
let nh = ph / win_res;
let nw = pw / win_res;
if RESOLUTIONS[stage] > win_res {
xs = xs.permute((0, 2, 3, 1))?;
xs = xs.pad_with_zeros(D::Minus1, 0, pad_r)?;
xs = xs.pad_with_zeros(D::Minus2, 0, pad_b)?;
xs = xs
.reshape((b, nh, win_res, nw, win_res, c))?
.transpose(2, 3)?;
xs = xs
.reshape((b * nh * nw, win_res, win_res, c))?
.permute((0, 3, 1, 2))?;
}
xs = xs.apply(&cga)?;
if RESOLUTIONS[stage] > win_res {
xs = xs
.permute((0, 2, 3, 1))?
.reshape((b, nh, nw, win_res, win_res, c))?;
xs = xs.transpose(2, 3)?.reshape((b, ph, pw, c))?;
xs = xs.permute((0, 3, 1, 2))?;
}
Ok(xs)
}))
}
// Cascaded group attention
fn cascaded_group_attn(
cfg: &Config,
stage: usize,
in_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let heads = cfg.heads[stage];
let key_dim = 16;
let val_dim = in_channels / heads;
let scale = (key_dim as f64).powf(-0.5);
let mut dws = Vec::with_capacity(heads);
let mut qkvs = Vec::with_capacity(heads);
for i in 0..heads {
dws.push(depthwise_conv(
key_dim,
cfg.kernels[i],
1,
cfg.kernels[i] / 2,
vb.pp(format!("dws.{i}")),
)?);
qkvs.push(pointwise_conv(
in_channels / heads,
in_channels / heads + 2 * key_dim,
vb.pp(format!("qkvs.{i}")),
)?);
}
let proj = pointwise_conv(in_channels, in_channels, vb.pp("proj.1"))?;
Ok(Func::new(move |xs| {
let (b, _, h, w) = xs.dims4()?;
let feats_in = xs.chunk(heads, 1)?;
let mut feats_out = Vec::with_capacity(heads);
let mut feat = feats_in[0].clone();
for i in 0..heads {
if i > 0 {
feat = (&feat + &feats_in[i])?;
}
feat = feat.apply(&qkvs[i])?;
let res = feat.reshape((b, (), h, w))?;
let q = res.narrow(1, 0, key_dim)?;
let k = res.narrow(1, key_dim, key_dim)?;
let v = res.narrow(1, 2 * key_dim, val_dim)?;
let q = q.apply(&dws[i])?;
let q = q.flatten_from(2)?;
let k = k.flatten_from(2)?;
let v = v.flatten_from(2)?;
let q = (q * scale)?;
let att = q.transpose(D::Minus2, D::Minus1)?.matmul(&k)?;
let att = softmax(&att, D::Minus1)?;
feat = v.matmul(&att.transpose(D::Minus2, D::Minus1)?)?;
feat = feat.reshape((b, val_dim, h, w))?;
feats_out.push(feat.clone());
}
let xs = Tensor::cat(&feats_out, 1)?;
let xs = xs.relu()?.apply(&proj)?;
Ok(xs)
}))
}
// Used by the downsampling layer
fn squeeze_and_excitation(
in_channels: usize,
squeeze_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let residual = xs;
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?;
residual.broadcast_mul(&xs)
}))
}
// Used by the downsampling layer
fn patchmerge(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let dim = in_channels;
let hid_dim = in_channels * 4;
let conv1 = pointwise_conv(dim, hid_dim, vb.pp("conv1"))?;
let conv2 = depthwise_conv(hid_dim, 3, 2, 1, vb.pp("conv2"))?;
let conv3 = pointwise_conv(hid_dim, out_channels, vb.pp("conv3"))?;
let se = squeeze_and_excitation(hid_dim, hid_dim / 4, vb.pp("se"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.relu()?
.apply(&conv2)?
.relu()?
.apply(&se)?
.apply(&conv3)?;
Ok(xs)
}))
}
// Used by the downsampling layer
fn res(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let dw = depthwise_conv(dim, 3, 1, 1, vb.pp("0.m"))?;
let mlp = conv_mlp(dim, dim * 2, vb.pp("1.m"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = (&xs + &xs.apply(&dw)?)?;
xs = (&xs + &xs.apply(&mlp)?)?;
Ok(xs)
}))
}
// Downsampling
fn efficientvit_downsample(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let res1 = res(in_channels, vb.pp("res1"))?;
let res2 = res(out_channels, vb.pp("res2"))?;
let patchmerge = patchmerge(in_channels, out_channels, vb.pp("patchmerge"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&res1)?.apply(&patchmerge)?.apply(&res2)?;
Ok(xs)
}))
}
fn efficientvit_block(
cfg: &Config,
stage: usize,
dim: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let dw0 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw0.m"))?;
let dw1 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw1.m"))?;
let ffn0 = conv_mlp(dim, dim * 2, vb.pp("ffn0.m"))?;
let ffn1 = conv_mlp(dim, dim * 2, vb.pp("ffn1.m"))?;
let attn = efficientvit_attn(cfg, stage, dim, vb.pp("mixer.m.attn"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = (&xs + &xs.apply(&dw0)?)?;
xs = (&xs + &xs.apply(&ffn0)?)?;
xs = (&xs + &xs.apply(&attn)?)?;
xs = (&xs + &xs.apply(&dw1)?)?;
xs = (&xs + &xs.apply(&ffn1)?)?;
Ok(xs)
}))
}
// Each stage is made of blocks. There is a downsampling layer between stages.
fn efficientvit_stage(cfg: &Config, stage: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[stage];
let mut blocks = Vec::with_capacity(nblocks + 1);
let in_channels = if stage > 0 {
cfg.channels[stage - 1]
} else {
cfg.channels[0]
};
let out_channels = cfg.channels[stage];
if stage > 0 {
blocks.push(efficientvit_downsample(
in_channels,
out_channels,
vb.pp("downsample"),
)?);
}
for i in 0..nblocks {
blocks.push(efficientvit_block(
cfg,
stage,
out_channels,
vb.pp(format!("blocks.{i}")),
)?);
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn efficientvit_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = batch_norm(outputs, 1e-6, vb.pp("bn"))?;
let linear = linear(outputs, nclasses, vb.pp("linear"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a efficientvit model for a given configuration.
fn efficientvit_model(
config: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = config.channels[2];
let head = efficientvit_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let stem_dim = config.channels[0];
let stem = efficientvit_stem(stem_dim, vb.pp("patch_embed"))?;
let vb = vb.pp("stages");
let stage1 = efficientvit_stage(config, 0, vb.pp(0))?;
let stage2 = efficientvit_stage(config, 1, vb.pp(1))?;
let stage3 = efficientvit_stage(config, 2, vb.pp(2))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.mean(D::Minus2)?
.mean(D::Minus1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn efficientvit(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
efficientvit_model(cfg, Some(nclasses), vb)
}
pub fn efficientvit_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
efficientvit_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/efficientvit.rs/0 | {
"file_path": "candle/candle-transformers/src/models/efficientvit.rs",
"repo_id": "candle",
"token_count": 6985
} | 41 |
use crate::models::with_tracing::{linear_no_bias, Embedding, Linear};
/// MPT model used by replit-code-v1_5-3b
/// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/configuration_mpt.py
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
pub(crate) d_model: usize,
pub(crate) n_heads: usize,
pub(crate) n_layers: usize,
pub(crate) expansion_ratio: usize,
pub(crate) max_seq_len: usize,
pub(crate) vocab_size: usize,
pub(crate) kv_n_heads: usize,
pub(crate) attn_prefix_lm: bool,
pub(crate) attn_alibi: bool,
pub(crate) attn_alibi_bias_max: usize,
}
impl Config {
pub fn replit_code_v1_5_3b() -> Self {
Self {
d_model: 3072,
n_heads: 24,
n_layers: 32,
expansion_ratio: 4,
max_seq_len: 4096,
vocab_size: 32768,
kv_n_heads: 8,
attn_prefix_lm: false,
attn_alibi: true,
attn_alibi_bias_max: 8,
}
}
pub fn is_causal(&self) -> bool {
!self.attn_prefix_lm
}
}
#[derive(Debug, Clone)]
struct GroupedQueryAttention {
wqkv: Linear,
out_proj: Linear,
kv_cache: Option<(Tensor, Tensor)>,
softmax_scale: f64,
head_dim: usize,
d_model: usize,
n_heads: usize,
kv_n_heads: usize,
attn_bias: Tensor,
span: tracing::Span,
}
impl GroupedQueryAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.d_model / cfg.n_heads;
let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim;
let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?;
let attn_bias = build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
wqkv,
out_proj,
kv_cache: None,
softmax_scale,
head_dim,
d_model: cfg.d_model,
n_heads: cfg.n_heads,
kv_n_heads: cfg.kv_n_heads,
attn_bias,
span: tracing::span!(tracing::Level::TRACE, "gqa"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self.wqkv.forward(xs)?;
let query = qkv.narrow(2, 0, self.d_model)?;
let kv_size = self.kv_n_heads * self.head_dim;
let key = qkv.narrow(2, self.d_model, kv_size)?;
let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?;
// scaled_multihead_dot_product_attention
let query = query
.reshape((b_size, seq_len, self.n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let key = key
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.permute((0, 2, 3, 1))?; // b,h,d,s
let value = value
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key], 3)?;
let v = Tensor::cat(&[prev_v, &value], 2)?;
(k, v)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
let query = query.contiguous()?;
let key = repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?;
let value = repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?;
let attn_weights = (query.matmul(&key)? * self.softmax_scale)?;
let attn_bias = {
let s_q = query.dim(D::Minus2)?;
let s_k = key.dim(D::Minus1)?;
let (_, _, a_q, a_k) = self.attn_bias.dims4()?;
let start_q = a_q.saturating_sub(s_q);
let start_k = a_k.saturating_sub(s_k);
self.attn_bias.i((.., .., start_q.., start_k..))?
};
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_as(attn_weights.shape())?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights
.matmul(&value)?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
let out = attn_output.apply(&self.out_proj)?;
Ok(out)
}
}
// This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
// The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to
// (batch, num_attention_heads, seqlen, head_dim)
pub(crate) fn repeat_kv(xs: Tensor, n_rep: usize) -> Result<Tensor> {
if n_rep == 1 {
Ok(xs)
} else {
let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?;
xs.unsqueeze(2)?
.expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))?
.reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim))
}
}
#[derive(Debug, Clone)]
struct Ffn {
up_proj: Linear,
down_proj: Linear,
}
impl Ffn {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden = cfg.d_model * cfg.expansion_ratio;
let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?;
Ok(Self { up_proj, down_proj })
}
}
impl Module for Ffn {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct MPTBlock {
norm1: LayerNorm, // Do we need the low-precision variant?
attn: GroupedQueryAttention,
norm2: LayerNorm,
ffn: Ffn,
}
impl MPTBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln_cfg = candle_nn::LayerNormConfig {
affine: false,
..Default::default()
};
let norm1 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_1"))?;
let norm2 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_2"))?;
let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?;
let ffn = Ffn::new(cfg, vb.pp("ffn"))?;
Ok(Self {
norm1,
attn,
norm2,
ffn,
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.norm1)?;
let xs = self.attn.forward(&xs, mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?;
xs + residual
}
}
pub(crate) fn build_alibi_bias(cfg: &Config) -> Result<Tensor> {
let full = !cfg.is_causal();
let seq_len = cfg.max_seq_len;
let alibi_bias = Tensor::arange(1 - seq_len as i64, 1, &Device::Cpu)?;
let alibi_bias = if full {
let a1 = alibi_bias.reshape((1, 1, 1, seq_len))?;
let a2 = alibi_bias.reshape((1, 1, seq_len, 1))?;
a1.broadcast_sub(&a2)?.abs()?.neg()?
} else {
alibi_bias.reshape((1, 1, 1, seq_len))?
};
let mut n_heads2 = 1;
while n_heads2 < cfg.n_heads {
n_heads2 *= 2
}
let slopes = (1..=n_heads2)
.map(|v| 1f32 / 2f32.powf((v * cfg.attn_alibi_bias_max) as f32 / n_heads2 as f32))
.collect::<Vec<_>>();
let slopes = if n_heads2 == cfg.n_heads {
slopes
} else {
slopes
.iter()
.skip(1)
.step_by(2)
.chain(slopes.iter().step_by(2))
.take(cfg.n_heads)
.cloned()
.collect::<Vec<f32>>()
};
let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?;
alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes)
}
#[derive(Debug, Clone)]
pub struct Model {
wte: Embedding,
blocks: Vec<MPTBlock>,
norm_f: LayerNorm,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?;
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(cfg.n_layers);
for i in 0..cfg.n_layers {
let block = MPTBlock::new(cfg, vb_b.pp(i))?;
blocks.push(block)
}
let ln_cfg = candle_nn::LayerNormConfig {
affine: false,
..Default::default()
};
let norm_f = candle_nn::layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_f"))?;
Ok(Self {
wte,
blocks,
norm_f,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.wte)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
let xs = xs.apply(&self.norm_f)?;
let logits = xs
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.matmul(&self.wte.embeddings().t()?)?
.squeeze(1)?;
Ok(logits)
}
}
pub(crate) fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
pub(crate) fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
| candle/candle-transformers/src/models/mpt.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mpt.rs",
"repo_id": "candle",
"token_count": 5485
} | 42 |
//! RepVGG inference implementation
//!
//! See "RepVGG: Making VGG-style ConvNets Great Again" Ding et al. 2021
//! https://arxiv.org/abs/2101.03697
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d_no_bias, linear, BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder,
};
const CHANNELS_PER_STAGE: [usize; 5] = [64, 64, 128, 256, 512];
#[derive(Clone)]
pub struct Config {
a: f32,
b: f32,
groups: usize,
stages: [usize; 4],
}
impl Config {
pub fn a0() -> Self {
Self {
a: 0.75,
b: 2.5,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn a1() -> Self {
Self {
a: 1.0,
b: 2.5,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn a2() -> Self {
Self {
a: 1.5,
b: 2.75,
groups: 1,
stages: [2, 4, 14, 1],
}
}
pub fn b0() -> Self {
Self {
a: 1.0,
b: 2.5,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b1() -> Self {
Self {
a: 2.0,
b: 4.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b2() -> Self {
Self {
a: 2.5,
b: 5.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b3() -> Self {
Self {
a: 3.0,
b: 5.0,
groups: 1,
stages: [4, 6, 16, 1],
}
}
pub fn b1g4() -> Self {
Self {
a: 2.0,
b: 4.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
pub fn b2g4() -> Self {
Self {
a: 2.5,
b: 5.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
pub fn b3g4() -> Self {
Self {
a: 3.0,
b: 5.0,
groups: 4,
stages: [4, 6, 16, 1],
}
}
}
// fuses a convolutional kernel and a batchnorm layer into a convolutional layer
// based on the _fuse_bn_tensor method in timm
// see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602
fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> {
let (gamma, beta) = bn.weight_and_bias().unwrap();
let mu = bn.running_mean();
let sigma = (bn.running_var() + bn.eps())?.sqrt();
let gps = (gamma / sigma)?;
let bias = (beta - mu * &gps)?;
let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?;
Ok((weights, bias))
}
// A RepVGG layer has a different training time and inference time architecture.
// The latter is a simple and efficient equivalent transformation of the former
// realized by a structural reparameterization technique, where 3x3 and 1x1 convolutions
// along with identity branches and batchnorm layers are fused into a single 3x3 convolution.
fn repvgg_layer(
has_identity: bool,
dim: usize,
stride: usize,
in_channels: usize,
out_channels: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
groups,
padding: 1,
..Default::default()
};
// read and reparameterize the 1x1 conv and bn into w1 and b1
// based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L543
let conv1x1_bn = batch_norm(dim, 1e-5, vb.pp("conv_1x1.bn"))?;
let conv1x1 = conv2d_no_bias(
in_channels,
out_channels,
1,
conv2d_cfg,
vb.pp("conv_1x1.conv"),
)?;
let (mut w1, b1) = fuse_conv_bn(conv1x1.weight(), conv1x1_bn)?;
// resize to 3x3
w1 = w1.pad_with_zeros(D::Minus1, 1, 1)?;
w1 = w1.pad_with_zeros(D::Minus2, 1, 1)?;
// read and reparameterize the 3x3 conv and bn into w3 and b3
let convkxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.bn"))?;
let conv3x3 = conv2d_no_bias(
in_channels,
out_channels,
3,
conv2d_cfg,
vb.pp("conv_kxk.conv"),
)?;
let (w3, b3) = fuse_conv_bn(conv3x3.weight(), convkxk_bn)?;
let mut w = (w1 + w3)?;
let mut b = (b1 + b3)?;
// read and reparameterize the identity bn into wi and bi
if has_identity {
let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?;
// create a 3x3 convolution equivalent to the identity branch
let mut weights: Vec<f32> = vec![0.0; conv3x3.weight().elem_count()];
// https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L620
let in_dim = in_channels / groups;
for i in 0..in_channels {
weights[i * in_dim * 3 * 3 + (i % in_dim) * 3 * 3 + 4] = 1.0;
}
let weights = &Tensor::from_vec(weights, w.shape(), w.device())?;
let (wi, bi) = fuse_conv_bn(weights, identity_bn)?;
w = (w + wi)?;
b = (b + bi)?;
}
// create the 3x3 conv equivalent to the sum of 3x3, 1x1 and identity branches
let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg);
Ok(Func::new(move |xs| {
let xs = xs.apply(&reparam_conv)?.relu()?;
Ok(xs)
}))
}
// Get the number of output channels per stage taking into account the multipliers
fn output_channels_per_stage(a: f32, b: f32, stage: usize) -> usize {
let channels = CHANNELS_PER_STAGE[stage] as f32;
match stage {
0 => std::cmp::min(64, (channels * a) as usize),
4 => (channels * b) as usize,
_ => (channels * a) as usize,
}
}
// Each stage is made of layers. The first layer always downsamples with stride 2.
// All but the first layer have a residual connection.
// The G4 variants have a groupwise convolution instead of a dense one on odd layers
// counted across stage boundaries, so we keep track of which layer we are in the
// full model.
fn repvgg_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nlayers = cfg.stages[idx - 1];
let mut layers = Vec::with_capacity(nlayers);
let prev_layers: usize = cfg.stages[..idx - 1].iter().sum();
let out_channels_prev = output_channels_per_stage(cfg.a, cfg.b, idx - 1);
let out_channels = output_channels_per_stage(cfg.a, cfg.b, idx);
for layer_idx in 0..nlayers {
let (has_identity, stride, in_channels) = if layer_idx == 0 {
(false, 2, out_channels_prev)
} else {
(true, 1, out_channels)
};
let groups = if (prev_layers + layer_idx) % 2 == 1 {
cfg.groups
} else {
1
};
layers.push(repvgg_layer(
has_identity,
out_channels,
stride,
in_channels,
out_channels,
groups,
vb.pp(layer_idx),
)?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
// Build a RepVGG model for a given configuration.
fn repvgg_model(config: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = output_channels_per_stage(config.a, config.b, 4);
let linear = linear(outputs, nclasses, vb.pp("head.fc"))?;
Some(linear)
}
};
let stem_dim = output_channels_per_stage(config.a, config.b, 0);
let stem = repvgg_layer(false, stem_dim, 2, 3, stem_dim, 1, vb.pp("stem"))?;
let vb = vb.pp("stages");
let stage1 = repvgg_stage(config, 1, vb.pp(0))?;
let stage2 = repvgg_stage(config, 2, vb.pp(1))?;
let stage3 = repvgg_stage(config, 3, vb.pp(2))?;
let stage4 = repvgg_stage(config, 4, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn repvgg(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
repvgg_model(cfg, Some(nclasses), vb)
}
pub fn repvgg_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
repvgg_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/repvgg.rs/0 | {
"file_path": "candle/candle-transformers/src/models/repvgg.rs",
"repo_id": "candle",
"token_count": 4371
} | 43 |
use candle::{Result, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
#[derive(Debug)]
pub struct TimestepEmbedding {
linear_1: nn::Linear,
linear_2: nn::Linear,
}
impl TimestepEmbedding {
// act_fn: "silu"
pub fn new(vs: nn::VarBuilder, channel: usize, time_embed_dim: usize) -> Result<Self> {
let linear_1 = nn::linear(channel, time_embed_dim, vs.pp("linear_1"))?;
let linear_2 = nn::linear(time_embed_dim, time_embed_dim, vs.pp("linear_2"))?;
Ok(Self { linear_1, linear_2 })
}
}
impl Module for TimestepEmbedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = nn::ops::silu(&self.linear_1.forward(xs)?)?;
self.linear_2.forward(&xs)
}
}
#[derive(Debug)]
pub struct Timesteps {
num_channels: usize,
flip_sin_to_cos: bool,
downscale_freq_shift: f64,
}
impl Timesteps {
pub fn new(num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64) -> Self {
Self {
num_channels,
flip_sin_to_cos,
downscale_freq_shift,
}
}
}
impl Module for Timesteps {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let half_dim = (self.num_channels / 2) as u32;
let exponent = (Tensor::arange(0, half_dim, xs.device())?.to_dtype(candle::DType::F32)?
* -f64::ln(10000.))?;
let exponent = (exponent / (half_dim as f64 - self.downscale_freq_shift))?;
let emb = exponent.exp()?.to_dtype(xs.dtype())?;
// emb = timesteps[:, None].float() * emb[None, :]
let emb = xs.unsqueeze(D::Minus1)?.broadcast_mul(&emb.unsqueeze(0)?)?;
let (cos, sin) = (emb.cos()?, emb.sin()?);
let emb = if self.flip_sin_to_cos {
Tensor::cat(&[&cos, &sin], D::Minus1)?
} else {
Tensor::cat(&[&sin, &cos], D::Minus1)?
};
if self.num_channels % 2 == 1 {
emb.pad_with_zeros(D::Minus2, 0, 1)
} else {
Ok(emb)
}
}
}
| candle/candle-transformers/src/models/stable_diffusion/embeddings.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/embeddings.rs",
"repo_id": "candle",
"token_count": 1008
} | 44 |
pub mod audio;
pub mod model;
pub mod quantized_model;
use serde::Deserialize;
// The names in comments correspond to the original implementation:
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L17
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub num_mel_bins: usize, // n_mels
pub max_source_positions: usize, // n_audio_ctx
pub d_model: usize, // n_audio_state
pub encoder_attention_heads: usize, // n_audio_head
pub encoder_layers: usize, // n_audio_layer
pub vocab_size: usize, // n_vocab
pub max_target_positions: usize, // n_text_ctx
// pub n_text_state: usize,
pub decoder_attention_heads: usize, // n_text_head
pub decoder_layers: usize, // n_text_layer
#[serde(default)]
pub suppress_tokens: Vec<u32>,
}
pub const DTYPE: candle::DType = candle::DType::F32;
// Audio parameters.
pub const SAMPLE_RATE: usize = 16000;
pub const N_FFT: usize = 400;
pub const HOP_LENGTH: usize = 160;
pub const CHUNK_LENGTH: usize = 30;
pub const N_SAMPLES: usize = CHUNK_LENGTH * SAMPLE_RATE; // 480000 samples in a 30-second chunk
pub const N_FRAMES: usize = N_SAMPLES / HOP_LENGTH; // 3000 frames in a mel spectrogram input
pub const NO_SPEECH_THRESHOLD: f64 = 0.6;
pub const LOGPROB_THRESHOLD: f64 = -1.0;
pub const TEMPERATURES: [f64; 6] = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0];
pub const COMPRESSION_RATIO_THRESHOLD: f64 = 2.4;
// Tokenizer dependent bits.
pub const SOT_TOKEN: &str = "<|startoftranscript|>";
pub const TRANSCRIBE_TOKEN: &str = "<|transcribe|>";
pub const TRANSLATE_TOKEN: &str = "<|translate|>";
pub const NO_TIMESTAMPS_TOKEN: &str = "<|notimestamps|>";
pub const EOT_TOKEN: &str = "<|endoftext|>";
pub const NO_SPEECH_TOKENS: [&str; 2] = ["<|nocaptions|>", "<|nospeech|>"];
| candle/candle-transformers/src/models/whisper/mod.rs/0 | {
"file_path": "candle/candle-transformers/src/models/whisper/mod.rs",
"repo_id": "candle",
"token_count": 812
} | 45 |
use candle::quantized::QTensor;
use candle::{Device, Result, Shape};
use std::sync::Arc;
// VarBuilder specialized for QTensors
#[derive(Clone)]
pub struct VarBuilder {
data: Arc<std::collections::HashMap<String, Arc<QTensor>>>,
path: Vec<String>,
device: Device,
}
impl VarBuilder {
pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> {
let mut file = std::fs::File::open(p)?;
let content = candle::quantized::gguf_file::Content::read(&mut file)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut file, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> {
let mut cursor = std::io::Cursor::new(buffer);
let content = candle::quantized::gguf_file::Content::read(&mut cursor)?;
let mut data = std::collections::HashMap::new();
for tensor_name in content.tensor_infos.keys() {
let tensor = content.tensor(&mut cursor, tensor_name, device)?;
data.insert(tensor_name.to_string(), Arc::new(tensor));
}
Ok(Self {
data: Arc::new(data),
path: Vec::new(),
device: device.clone(),
})
}
pub fn pp<S: ToString>(&self, s: S) -> Self {
let mut path = self.path.clone();
path.push(s.to_string());
Self {
data: self.data.clone(),
path,
device: self.device.clone(),
}
}
fn path(&self, tensor_name: &str) -> String {
if self.path.is_empty() {
tensor_name.to_string()
} else {
[&self.path.join("."), tensor_name].join(".")
}
}
pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {name}")
}
Some(qtensor) => {
let shape = s.into();
if qtensor.shape() != &shape {
candle::bail!(
"shape mismatch for {name}, got {:?}, expected {shape:?}",
qtensor.shape()
)
}
Ok(qtensor.clone())
}
}
}
pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> {
let path = self.path(name);
match self.data.get(&path) {
None => {
candle::bail!("cannot find tensor {name}")
}
Some(qtensor) => Ok(qtensor.clone()),
}
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn contains_key(&self, key: &str) -> bool {
self.data.contains_key(key)
}
}
| candle/candle-transformers/src/quantized_var_builder.rs/0 | {
"file_path": "candle/candle-transformers/src/quantized_var_builder.rs",
"repo_id": "candle",
"token_count": 1559
} | 46 |
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use candle_transformers::models::blip;
use candle_transformers::models::quantized_blip;
use candle_wasm_example_blip::console_log;
use candle_wasm_example_blip::token_output_stream::TokenOutputStream;
use js_sys::Date;
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
enum SelectedModel {
M(blip::BlipForConditionalGeneration),
Q(quantized_blip::BlipForConditionalGeneration),
}
impl SelectedModel {
fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor, JsError> {
match self {
Self::M(m) => m
.text_decoder()
.forward(xs, img_xs)
.map_err(|e| JsError::new(&e.to_string())),
Self::Q(m) => m
.text_decoder()
.forward(xs, img_xs)
.map_err(|e| JsError::new(&e.to_string())),
}
}
fn reset_kv_cache(&mut self) {
match self {
Self::M(m) => m.reset_kv_cache(),
Self::Q(m) => m.reset_kv_cache(),
}
}
}
#[wasm_bindgen]
pub struct Model {
model: SelectedModel,
tokenizer: TokenOutputStream,
}
const SEP_TOKEN_ID: u32 = 102;
#[wasm_bindgen]
impl Model {
#[wasm_bindgen(constructor)]
pub fn load(
weights: Vec<u8>,
tokenizer: Vec<u8>,
config: Vec<u8>,
quantized: bool,
) -> Result<Model, JsError> {
console_error_panic_hook::set_once();
console_log!("loading model");
let tokenizer =
Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?;
let tokenizer = TokenOutputStream::new(tokenizer);
let config: blip::Config = serde_json::from_slice(&config)?;
let device = Device::Cpu;
let start = Date::now();
let model: SelectedModel = if quantized {
let vb = quantized_blip::VarBuilder::from_gguf_buffer(&weights, &device)?;
let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?;
SelectedModel::Q(model)
} else {
let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, &device)?;
let model = blip::BlipForConditionalGeneration::new(&config, vb)?;
SelectedModel::M(model)
};
console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.);
Ok(Self { model, tokenizer })
}
#[wasm_bindgen]
pub fn generate_caption_from_image(&mut self, image: Vec<u8>) -> Result<String, JsError> {
self.model.reset_kv_cache();
let device = Device::Cpu;
console_log!("loading image as tensor");
let start = Date::now();
let image: Tensor = self.load_image(image)?.to_device(&device)?;
console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.);
let start = Date::now();
let image_embeds: Tensor = match &mut self.model {
SelectedModel::M(m) => image.unsqueeze(0)?.apply(m.vision_model())?,
SelectedModel::Q(m) => image.unsqueeze(0)?.apply(m.vision_model())?,
};
console_log!("image embedded in {:?}s", (Date::now() - start) / 1000.);
let mut logits_processor = LogitsProcessor::new(299792458, None, None);
let mut token_ids = vec![30522u32];
let mut text: String = "".to_string();
let start = Date::now();
for index in 0..1000 {
let context_size = if index > 0 { 1 } else { token_ids.len() };
let start_pos = token_ids.len().saturating_sub(context_size);
let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?;
let logits = self.model.text_decoder_forward(&input_ids, &image_embeds)?;
let logits = logits.squeeze(0)?;
let logits = logits.get(logits.dim(0)? - 1)?;
let token = logits_processor.sample(&logits)?;
if token == SEP_TOKEN_ID {
break;
}
token_ids.push(token);
if let Some(t) = self.tokenizer.next_token(token)? {
text.push_str(&t);
}
}
if let Some(rest) = self
.tokenizer
.decode_rest()
.map_err(|m| JsError::new(&m.to_string()))?
{
text.push_str(&rest);
}
console_log!("caption generated in {:?}s", (Date::now() - start) / 1000.);
Ok(text)
}
}
impl Model {
fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> {
let device = &Device::Cpu;
let img = image::io::Reader::new(std::io::Cursor::new(image))
.with_guessed_format()?
.decode()
.map_err(|e| JsError::new(&e.to_string()))?
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (384, 384, 3), device)?.permute((2, 0, 1))?;
let mean =
Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?;
let std =
Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
.map_err(|e| JsError::new(&e.to_string()))
}
}
fn main() {
console_error_panic_hook::set_once();
}
| candle/candle-wasm-examples/blip/src/bin/m.rs/0 | {
"file_path": "candle/candle-wasm-examples/blip/src/bin/m.rs",
"repo_id": "candle",
"token_count": 2699
} | 47 |
//load Candle Bert Module wasm module
let init, ModelConditionalGeneration;
async function fetchArrayBuffer(url) {
const cacheName = "t5-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class ConditionalGeneration {
static instance = {};
static async getInstance(weightsURL, tokenizerURL, configURL, modelID) {
if (modelID.includes("quantized")) {
({ default: init, ModelConditionalGeneration } = await import(
"./build/m-quantized.js"
));
} else {
({ default: init, ModelConditionalGeneration } = await import(
"./build/m.js"
));
}
if (!this.instance[modelID]) {
await init();
self.postMessage({ status: "loading", message: "Loading Model" });
const [weightsArrayU8, tokenizerArrayU8, configArrayU8] =
await Promise.all([
fetchArrayBuffer(weightsURL),
fetchArrayBuffer(tokenizerURL),
fetchArrayBuffer(configURL),
]);
this.instance[modelID] = new ModelConditionalGeneration(
weightsArrayU8,
tokenizerArrayU8,
configArrayU8
);
} else {
self.postMessage({ status: "ready", message: "Model Already Loaded" });
}
return this.instance[modelID];
}
}
self.addEventListener("message", async (event) => {
const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } =
event.data;
let {
temperature = 0.0,
seed = 299792458,
repeat_penalty = 1.1,
repeat_last_n = 64,
top_p = 1,
} = { ...params };
try {
self.postMessage({
status: "ready",
message: "Starting T5 Conditional Generation",
});
const model = await ConditionalGeneration.getInstance(
weightsURL,
tokenizerURL,
configURL,
modelID
);
self.postMessage({
status: "decoding",
message: "Decoding Prompt",
});
const output = model.decode({
prompt,
temperature,
seed,
top_p,
repeat_penalty,
repeat_last_n,
});
self.postMessage({
status: "complete",
message: "complete",
output: output,
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js/0 | {
"file_path": "candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js",
"repo_id": "candle",
"token_count": 980
} | 48 |
fn main() {
wasm_logger::init(wasm_logger::Config::new(log::Level::Trace));
yew::Renderer::<candle_wasm_example_whisper::App>::new().render();
}
| candle/candle-wasm-examples/whisper/src/bin/app.rs/0 | {
"file_path": "candle/candle-wasm-examples/whisper/src/bin/app.rs",
"repo_id": "candle",
"token_count": 67
} | 49 |
module.exports = {
root: true,
parser: "@typescript-eslint/parser",
extends: [
"eslint:recommended",
"plugin:@typescript-eslint/recommended",
"plugin:svelte/recommended",
"prettier",
],
plugins: ["@typescript-eslint"],
ignorePatterns: ["*.cjs"],
overrides: [
{
files: ["*.svelte"],
parser: "svelte-eslint-parser",
parserOptions: {
parser: "@typescript-eslint/parser",
},
},
],
parserOptions: {
sourceType: "module",
ecmaVersion: 2020,
extraFileExtensions: [".svelte"],
},
rules: {
"no-shadow": ["error"],
"@typescript-eslint/no-explicit-any": "error",
"@typescript-eslint/no-non-null-assertion": "error",
"@typescript-eslint/no-unused-vars": [
// prevent variables with a _ prefix from being marked as unused
"error",
{
argsIgnorePattern: "^_",
},
],
"object-shorthand": ["error", "always"],
},
env: {
browser: true,
es2017: true,
node: true,
},
};
| chat-ui/.eslintrc.cjs/0 | {
"file_path": "chat-ui/.eslintrc.cjs",
"repo_id": "chat-ui",
"token_count": 419
} | 50 |
engine-strict=true
| chat-ui/.npmrc/0 | {
"file_path": "chat-ui/.npmrc",
"repo_id": "chat-ui",
"token_count": 7
} | 51 |
import fs from "fs";
const HF_DEPLOYMENT_TOKEN = process.env.HF_DEPLOYMENT_TOKEN; // token used for pushing to hub
const SERPER_API_KEY = process.env.SERPER_API_KEY;
const OPENID_CONFIG = process.env.OPENID_CONFIG;
const MONGODB_URL = process.env.MONGODB_URL;
const HF_TOKEN = process.env.HF_TOKEN ?? process.env.HF_ACCESS_TOKEN; // token used for API requests in prod
const WEBHOOK_URL_REPORT_ASSISTANT = process.env.WEBHOOK_URL_REPORT_ASSISTANT; // slack webhook url used to get "report assistant" events
const ADMIN_API_SECRET = process.env.ADMIN_API_SECRET;
const USAGE_LIMITS = process.env.USAGE_LIMITS;
const MESSAGES_BEFORE_LOGIN = process.env.MESSAGES_BEFORE_LOGIN;
// Read the content of the file .env.template
const PUBLIC_CONFIG = fs.readFileSync(".env.template", "utf8");
// Prepend the content of the env variable SECRET_CONFIG
const full_config = `${PUBLIC_CONFIG}
MONGODB_URL=${MONGODB_URL}
OPENID_CONFIG=${OPENID_CONFIG}
SERPER_API_KEY=${SERPER_API_KEY}
HF_TOKEN=${HF_TOKEN}
WEBHOOK_URL_REPORT_ASSISTANT=${WEBHOOK_URL_REPORT_ASSISTANT}
ADMIN_API_SECRET=${ADMIN_API_SECRET}
USAGE_LIMITS=${USAGE_LIMITS}
MESSAGES_BEFORE_LOGIN=${MESSAGES_BEFORE_LOGIN}
`;
// Make an HTTP POST request to add the space secrets
fetch(`https://huggingface.co/api/spaces/huggingchat/chat-ui/secrets`, {
method: "POST",
body: JSON.stringify({
key: "DOTENV_LOCAL",
value: full_config,
description: `Env variable for HuggingChat. Last updated ${new Date().toISOString()}`,
}),
headers: {
Authorization: `Bearer ${HF_DEPLOYMENT_TOKEN}`,
"Content-Type": "application/json",
},
});
| chat-ui/scripts/updateProdEnv.ts/0 | {
"file_path": "chat-ui/scripts/updateProdEnv.ts",
"repo_id": "chat-ui",
"token_count": 628
} | 52 |
<script lang="ts">
import { navigating } from "$app/stores";
import { createEventDispatcher } from "svelte";
import { browser } from "$app/environment";
import { base } from "$app/paths";
import { page } from "$app/stores";
import CarbonClose from "~icons/carbon/close";
import CarbonTextAlignJustify from "~icons/carbon/text-align-justify";
import IconNew from "$lib/components/icons/IconNew.svelte";
export let isOpen = false;
export let title: string | undefined;
$: title = title ?? "New Chat";
let closeEl: HTMLButtonElement;
let openEl: HTMLButtonElement;
const dispatch = createEventDispatcher();
$: if ($navigating) {
dispatch("toggle", false);
}
$: if (isOpen && closeEl) {
closeEl.focus();
} else if (!isOpen && browser && document.activeElement === closeEl) {
openEl.focus();
}
</script>
<nav
class="flex h-12 items-center justify-between border-b bg-gray-50 px-3 md:hidden dark:border-gray-800 dark:bg-gray-800/70"
>
<button
type="button"
class="-ml-3 flex size-12 shrink-0 items-center justify-center text-lg"
on:click={() => dispatch("toggle", true)}
aria-label="Open menu"
bind:this={openEl}><CarbonTextAlignJustify /></button
>
<span class="truncate px-4">{title}</span>
<a
class:invisible={!$page.params.id}
href="{base}/"
class="-mr-3 flex size-12 shrink-0 items-center justify-center text-lg"><IconNew /></a
>
</nav>
<nav
class="fixed inset-0 z-30 grid max-h-screen grid-cols-1 grid-rows-[auto,auto,1fr,auto] bg-white dark:bg-gray-900 {isOpen
? 'block'
: 'hidden'}"
>
<div class="flex h-12 items-center px-4">
<button
type="button"
class="-mr-3 ml-auto flex size-12 items-center justify-center text-lg"
on:click={() => dispatch("toggle", false)}
aria-label="Close menu"
bind:this={closeEl}><CarbonClose /></button
>
</div>
<slot />
</nav>
| chat-ui/src/lib/components/MobileNav.svelte/0 | {
"file_path": "chat-ui/src/lib/components/MobileNav.svelte",
"repo_id": "chat-ui",
"token_count": 692
} | 53 |
<script lang="ts">
import CarbonUpload from "~icons/carbon/upload";
export let classNames = "";
export let files: File[];
let filelist: FileList;
$: if (filelist) {
files = Array.from(filelist);
}
</script>
<button
class="btn relative h-8 rounded-lg border bg-white px-3 py-1 text-sm text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<input
bind:files={filelist}
class="absolute w-full cursor-pointer opacity-0"
type="file"
accept="image/*"
/>
<CarbonUpload class="mr-2 text-xs " /> Upload image
</button>
| chat-ui/src/lib/components/UploadBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/UploadBtn.svelte",
"repo_id": "chat-ui",
"token_count": 233
} | 54 |
export const PUBLIC_SEP_TOKEN = "</s>";
| chat-ui/src/lib/constants/publicSepToken.ts/0 | {
"file_path": "chat-ui/src/lib/constants/publicSepToken.ts",
"repo_id": "chat-ui",
"token_count": 16
} | 55 |
import type { Conversation } from "$lib/types/Conversation";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { endpointTgi, endpointTgiParametersSchema } from "./tgi/endpointTgi";
import { z } from "zod";
import endpointAws, { endpointAwsParametersSchema } from "./aws/endpointAws";
import { endpointOAIParametersSchema, endpointOai } from "./openai/endpointOai";
import endpointLlamacpp, { endpointLlamacppParametersSchema } from "./llamacpp/endpointLlamacpp";
import endpointOllama, { endpointOllamaParametersSchema } from "./ollama/endpointOllama";
import {
endpointAnthropic,
endpointAnthropicParametersSchema,
} from "./anthropic/endpointAnthropic";
// parameters passed when generating text
export interface EndpointParameters {
messages: Omit<Conversation["messages"][0], "id">[];
preprompt?: Conversation["preprompt"];
continueMessage?: boolean; // used to signal that the last message will be extended
}
interface CommonEndpoint {
weight: number;
}
// type signature for the endpoint
export type Endpoint = (
params: EndpointParameters
) => Promise<AsyncGenerator<TextGenerationStreamOutput, void, void>>;
// generator function that takes in parameters for defining the endpoint and return the endpoint
export type EndpointGenerator<T extends CommonEndpoint> = (parameters: T) => Endpoint;
// list of all endpoint generators
export const endpoints = {
tgi: endpointTgi,
anthropic: endpointAnthropic,
aws: endpointAws,
openai: endpointOai,
llamacpp: endpointLlamacpp,
ollama: endpointOllama,
};
export const endpointSchema = z.discriminatedUnion("type", [
endpointAnthropicParametersSchema,
endpointAwsParametersSchema,
endpointOAIParametersSchema,
endpointTgiParametersSchema,
endpointLlamacppParametersSchema,
endpointOllamaParametersSchema,
]);
export default endpoints;
| chat-ui/src/lib/server/endpoints/endpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/endpoints.ts",
"repo_id": "chat-ui",
"token_count": 552
} | 56 |
import { z } from "zod";
import { USAGE_LIMITS, RATE_LIMIT } from "$env/static/private";
import JSON5 from "json5";
// RATE_LIMIT is the legacy way to define messages per minute limit
export const usageLimitsSchema = z
.object({
conversations: z.coerce.number().optional(), // how many conversations
messages: z.coerce.number().optional(), // how many messages in a conversation
assistants: z.coerce.number().optional(), // how many assistants
messageLength: z.coerce.number().optional(), // how long can a message be before we cut it off
messagesPerMinute: z
.preprocess((val) => {
if (val === undefined) {
return RATE_LIMIT;
}
return val;
}, z.coerce.number().optional())
.optional(), // how many messages per minute
})
.optional();
export const usageLimits = usageLimitsSchema.parse(JSON5.parse(USAGE_LIMITS));
| chat-ui/src/lib/server/usageLimits.ts/0 | {
"file_path": "chat-ui/src/lib/server/usageLimits.ts",
"repo_id": "chat-ui",
"token_count": 297
} | 57 |
// Ideally shouldn't be needed, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
import type { Conversation } from "./Conversation";
import type { Timestamps } from "./Timestamps";
export interface AbortedGeneration extends Timestamps {
conversationId: Conversation["_id"];
}
| chat-ui/src/lib/types/AbortedGeneration.ts/0 | {
"file_path": "chat-ui/src/lib/types/AbortedGeneration.ts",
"repo_id": "chat-ui",
"token_count": 93
} | 58 |
export interface Timestamps {
createdAt: Date;
updatedAt: Date;
}
| chat-ui/src/lib/types/Timestamps.ts/0 | {
"file_path": "chat-ui/src/lib/types/Timestamps.ts",
"repo_id": "chat-ui",
"token_count": 23
} | 59 |
import { PUBLIC_APP_ASSETS } from "$env/static/public";
export const isHuggingChat = PUBLIC_APP_ASSETS === "huggingchat";
| chat-ui/src/lib/utils/isHuggingChat.ts/0 | {
"file_path": "chat-ui/src/lib/utils/isHuggingChat.ts",
"repo_id": "chat-ui",
"token_count": 40
} | 60 |
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { describe, expect, it } from "vitest";
import {
insertLegacyConversation,
insertLinearBranchConversation,
insertSideBranchesConversation,
} from "./treeHelpers.spec";
import { buildSubtree } from "./buildSubtree";
describe("buildSubtree", () => {
it("a subtree in a legacy conversation should be just a slice", async () => {
const convId = await insertLegacyConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
// check middle
const id = conv.messages[2].id;
const subtree = buildSubtree(conv, id);
expect(subtree).toEqual(conv.messages.slice(0, 3));
// check zero
const id2 = conv.messages[0].id;
const subtree2 = buildSubtree(conv, id2);
expect(subtree2).toEqual(conv.messages.slice(0, 1));
//check full length
const id3 = conv.messages[conv.messages.length - 1].id;
const subtree3 = buildSubtree(conv, id3);
expect(subtree3).toEqual(conv.messages);
});
it("a subtree in a linear branch conversation should be the ancestors and the message", async () => {
const convId = await insertLinearBranchConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
// check middle
const id = conv.messages[1].id;
const subtree = buildSubtree(conv, id);
expect(subtree).toEqual([conv.messages[0], conv.messages[1]]);
// check zero
const id2 = conv.messages[0].id;
const subtree2 = buildSubtree(conv, id2);
expect(subtree2).toEqual([conv.messages[0]]);
//check full length
const id3 = conv.messages[conv.messages.length - 1].id;
const subtree3 = buildSubtree(conv, id3);
expect(subtree3).toEqual(conv.messages);
});
it("should throw an error if the message is not found", async () => {
const convId = await insertLinearBranchConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
const id = "not-a-real-id-test";
expect(() => buildSubtree(conv, id)).toThrow("Message not found");
});
it("should throw an error if the ancestor is not found", async () => {
const convId = await insertLinearBranchConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
const id = "1-1-1-1-2";
conv.messages[1].ancestors = ["not-a-real-id-test"];
expect(() => buildSubtree(conv, id)).toThrow("Ancestor not found");
});
it("should work on empty conversations", () => {
const conv = {
_id: new ObjectId(),
rootMessageId: undefined,
messages: [],
};
const subtree = buildSubtree(conv, "not-a-real-id-test");
expect(subtree).toEqual([]);
});
it("should work for conversation with subtrees", async () => {
const convId = await insertSideBranchesConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
const subtree = buildSubtree(conv, "1-1-1-1-2");
expect(subtree).toEqual([conv.messages[0], conv.messages[1]]);
const subtree2 = buildSubtree(conv, "1-1-1-1-4");
expect(subtree2).toEqual([
conv.messages[0],
conv.messages[1],
conv.messages[2],
conv.messages[3],
]);
const subtree3 = buildSubtree(conv, "1-1-1-1-6");
expect(subtree3).toEqual([conv.messages[0], conv.messages[4], conv.messages[5]]);
const subtree4 = buildSubtree(conv, "1-1-1-1-7");
expect(subtree4).toEqual([conv.messages[0], conv.messages[4], conv.messages[6]]);
});
});
| chat-ui/src/lib/utils/tree/buildSubtree.spec.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/buildSubtree.spec.ts",
"repo_id": "chat-ui",
"token_count": 1375
} | 61 |
export async function GET({ locals }) {
if (locals.user) {
const res = {
id: locals.user._id,
username: locals.user.username,
name: locals.user.name,
email: locals.user.email,
avatarUrl: locals.user.avatarUrl,
hfUserId: locals.user.hfUserId,
};
return Response.json(res);
}
return Response.json({ message: "Must be signed in" }, { status: 401 });
}
| chat-ui/src/routes/api/user/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/user/+server.ts",
"repo_id": "chat-ui",
"token_count": 148
} | 62 |
import { base } from "$app/paths";
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { redirect } from "@sveltejs/kit";
export const actions = {
async delete({ locals }) {
// double check we have a user to delete conversations for
if (locals.user?._id || locals.sessionId) {
await collections.conversations.deleteMany({
...authCondition(locals),
});
}
throw redirect(303, `${base}/`);
},
};
| chat-ui/src/routes/conversations/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/conversations/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 158
} | 63 |
<script lang="ts">
import { page } from "$app/stores";
import { base } from "$app/paths";
import { PUBLIC_ORIGIN } from "$env/static/public";
import type { BackendModel } from "$lib/server/models";
import { useSettingsStore } from "$lib/stores/settings";
import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte";
import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
import CarbonLink from "~icons/carbon/link";
const settings = useSettingsStore();
$: if ($settings.customPrompts[$page.params.model] === undefined) {
$settings.customPrompts = {
...$settings.customPrompts,
[$page.params.model]:
$page.data.models.find((el: BackendModel) => el.id === $page.params.model)?.preprompt || "",
};
}
$: hasCustomPreprompt =
$settings.customPrompts[$page.params.model] !==
$page.data.models.find((el: BackendModel) => el.id === $page.params.model)?.preprompt;
$: isActive = $settings.activeModel === $page.params.model;
$: model = $page.data.models.find((el: BackendModel) => el.id === $page.params.model);
</script>
<div class="flex flex-col items-start">
<div class="mb-5 flex flex-col gap-1.5">
<h2 class="text-lg font-semibold md:text-xl">
{$page.params.model}
</h2>
{#if model.description}
<p class="whitespace-pre-wrap text-gray-600">
{model.description}
</p>
{/if}
</div>
<div class="flex flex-wrap items-center gap-2 md:gap-4">
{#if model.modelUrl}
<a
href={model.modelUrl || "https://huggingface.co/" + model.name}
target="_blank"
rel="noreferrer"
class="flex items-center truncate underline underline-offset-2"
>
<CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " />
Model page
</a>
{/if}
{#if model.datasetName || model.datasetUrl}
<a
href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName}
target="_blank"
rel="noreferrer"
class="flex items-center truncate underline underline-offset-2"
>
<CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " />
Dataset page
</a>
{/if}
{#if model.websiteUrl}
<a
href={model.websiteUrl}
target="_blank"
class="flex items-center truncate underline underline-offset-2"
rel="noreferrer"
>
<CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " />
Model website
</a>
{/if}
<CopyToClipBoardBtn
value="{PUBLIC_ORIGIN || $page.url.origin}{base}/models/{model.id}"
classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md"
>
<div class="flex items-center gap-1.5 hover:underline">
<CarbonLink />Copy direct link to model
</div>
</CopyToClipBoardBtn>
</div>
<button
class="{isActive
? 'bg-gray-100'
: 'bg-black text-white'} my-8 flex items-center rounded-full px-3 py-1"
disabled={isActive}
name="Activate model"
on:click|stopPropagation={() => {
$settings.activeModel = $page.params.model;
}}
>
{isActive ? "Active model" : "Activate"}
</button>
<div class="flex w-full flex-col gap-2">
<div class="flex w-full flex-row content-between">
<h3 class="mb-1.5 text-lg font-semibold text-gray-800">System Prompt</h3>
{#if hasCustomPreprompt}
<button
class="ml-auto underline decoration-gray-300 hover:decoration-gray-700"
on:click|stopPropagation={() =>
($settings.customPrompts[$page.params.model] = model.preprompt)}
>
Reset
</button>
{/if}
</div>
<textarea
rows="10"
class="w-full resize-none rounded-md border-2 bg-gray-100 p-2"
bind:value={$settings.customPrompts[$page.params.model]}
/>
</div>
</div>
| chat-ui/src/routes/settings/(nav)/[...model]/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/[...model]/+page.svelte",
"repo_id": "chat-ui",
"token_count": 1518
} | 64 |
# How to add one new datasets
Add datasets directly to the ๐ค Hugging Face Hub!
You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation:
* [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset)
* [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
| datasets/ADD_NEW_DATASET.md/0 | {
"file_path": "datasets/ADD_NEW_DATASET.md",
"repo_id": "datasets",
"token_count": 113
} | 65 |
# Differences between Dataset and IterableDataset
There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`].
Whichever type of dataset you choose to use or create depends on the size of the dataset.
In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else.
This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you.
## Downloading and streaming
When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows.
Such datasets are also called "map-style" datasets.
For example you can download ImageNet-1k like this and access any row:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset
print(imagenet[0])
```
But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk.
Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`].
When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset.
This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk.
For example, you can stream the ImageNet-1k dataset without downloading it on disk:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over
for example in imagenet:
print(example)
break
```
Streaming can read online data without writing any file to disk.
For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en).
Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream).
This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing.
## Creating map-style datasets and iterable datasets
You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row:
```python
my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
print(my_dataset[0])
```
To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data.
In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`:
```python
def my_generator(n):
for i in range(n):
yield {"col_1": i}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10})
for example in my_iterable_dataset:
print(example)
break
```
## Loading local files entirely and progressively
It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]:
```python
data_files = {"train": ["path/to/data.csv"]}
my_dataset = load_dataset("csv", data_files=data_files, split="train")
print(my_dataset[0])
```
However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big.
To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly.
This way, the data is read progressively from the local files as you iterate over the dataset:
```python
data_files = {"train": ["path/to/data.csv"]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset
print(example)
break
```
Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files.
You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets.
## Eager data processing and lazy data processing
When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned.
This is similar to how `pandas` works for example.
```python
my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset
print(my_dataset[0])
```
On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset.
Instead, your `map` function is applied on-the-fly.
Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset:
```python
my_iterable_dataset = my_iterable_dataset.map(process_fn_1)
my_iterable_dataset = my_iterable_dataset.filter(filter_fn)
my_iterable_dataset = my_iterable_dataset.map(process_fn_2)
# process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset
for example in my_iterable_dataset:
print(example)
break
```
## Exact and fast approximate shuffling
When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset.
It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list.
Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled:
```python
my_dataset = my_dataset.shuffle(seed=42)
print(my_dataset[0])
```
Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position.
This prevents the use of exact shuffling.
Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`].
It uses a shuffle buffer to sample random examples iteratively from the dataset.
Since the dataset is still read iteratively, it provides excellent speed performance:
```python
my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in my_iterable_dataset:
print(example)
break
```
But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources:
```python
# Stream from the internet
my_iterable_dataset = load_dataset("deepmind/code_contests", split="train", streaming=True)
my_iterable_dataset.n_shards # 39
# Stream from local files
data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
my_iterable_dataset.n_shards # 1024
# From a generator function
def my_generator(n, sources):
for source in sources:
for example_id_for_current_source in range(n):
yield {"example_id": f"{source}_{example_id_for_current_source}"}
gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs)
my_iterable_dataset.n_shards # 1024
```
## Speed differences
Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows.
Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization.
It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches.
However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal.
You can also reshuffle the dataset easily:
```python
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous
for example in enumerate(shuffled_iterable_dataset): # still as fast as before
pass
```
If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`.
It makes it easy to reshuffle a dataset between epochs:
```python
for epoch in range(n_epochs):
my_iterable_dataset.set_epoch(epoch)
for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch`
pass
```
## Switch from map-style to iterable
If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset()
```
If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a sharded [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024)
my_iterable_dataset.n_shards # 1024
```
| datasets/docs/source/about_mapstyle_vs_iterable.mdx/0 | {
"file_path": "datasets/docs/source/about_mapstyle_vs_iterable.mdx",
"repo_id": "datasets",
"token_count": 3261
} | 66 |
# Image classification
Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf.
Load the dataset and take a look at an example:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("beans")
>>> dataset["train"][10]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7F8D2F4D7A10>,
'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg',
'labels': 0}
```
The dataset has three fields:
* `image`: a PIL image object.
* `image_file_path`: the path to the image file.
* `labels`: the label or category of the image.
Next, check out an image:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf.png">
</div>
Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness.
```py
>>> import cv2
>>> import albumentations
>>> import numpy as np
>>> transform = albumentations.Compose([
... albumentations.RandomCrop(width=256, height=256),
... albumentations.HorizontalFlip(p=0.5),
... albumentations.RandomBrightnessContrast(p=0.2),
... ])
```
Create a function to apply the transformation to the images:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [
... transform(image=np.array(image))["image"] for image in examples["image"]
... ]
...
... return examples
```
Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space:
```py
>>> dataset.set_transform(transforms)
```
You can verify the transformation worked by indexing into the `pixel_values` of the first example:
```py
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> img = dataset["train"][0]["pixel_values"]
>>> plt.imshow(img)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"/>
</div>
<Tip>
Now that you know how to process a dataset for image classification, learn
[how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)
and use it for inference.
</Tip> | datasets/docs/source/image_classification.mdx/0 | {
"file_path": "datasets/docs/source/image_classification.mdx",
"repo_id": "datasets",
"token_count": 1043
} | 67 |
# Main classes
## DatasetInfo
[[autodoc]] datasets.DatasetInfo
## Dataset
The base class [`Dataset`] implements a Dataset backed by an Apache Arrow table.
[[autodoc]] datasets.Dataset
- add_column
- add_item
- from_file
- from_buffer
- from_pandas
- from_dict
- from_generator
- data
- cache_files
- num_columns
- num_rows
- column_names
- shape
- unique
- flatten
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
- class_encode_column
- __len__
- __iter__
- iter
- formatted_as
- set_format
- set_transform
- reset_format
- with_format
- with_transform
- __getitem__
- cleanup_cache_files
- map
- filter
- select
- sort
- shuffle
- train_test_split
- shard
- to_tf_dataset
- push_to_hub
- save_to_disk
- load_from_disk
- flatten_indices
- to_csv
- to_pandas
- to_dict
- to_json
- to_parquet
- to_sql
- to_iterable_dataset
- add_faiss_index
- add_faiss_index_from_external_arrays
- save_faiss_index
- load_faiss_index
- add_elasticsearch_index
- load_elasticsearch_index
- list_indexes
- get_index
- drop_index
- search
- search_batch
- get_nearest_examples
- get_nearest_examples_batch
- info
- split
- builder_name
- citation
- config_name
- dataset_size
- description
- download_checksums
- download_size
- features
- homepage
- license
- size_in_bytes
- supervised_keys
- version
- from_csv
- from_json
- from_parquet
- from_text
- from_sql
- prepare_for_task
- align_labels_with_mapping
[[autodoc]] datasets.concatenate_datasets
[[autodoc]] datasets.interleave_datasets
[[autodoc]] datasets.distributed.split_dataset_by_node
[[autodoc]] datasets.enable_caching
[[autodoc]] datasets.disable_caching
[[autodoc]] datasets.is_caching_enabled
## DatasetDict
Dictionary with split names as keys ('train', 'test' for example), and `Dataset` objects as values.
It also has dataset transform methods like map or filter, to process all the splits at once.
[[autodoc]] datasets.DatasetDict
- data
- cache_files
- num_columns
- num_rows
- column_names
- shape
- unique
- cleanup_cache_files
- map
- filter
- sort
- shuffle
- set_format
- reset_format
- formatted_as
- with_format
- with_transform
- flatten
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
- class_encode_column
- push_to_hub
- save_to_disk
- load_from_disk
- from_csv
- from_json
- from_parquet
- from_text
- prepare_for_task
<a id='package_reference_features'></a>
## IterableDataset
The base class [`IterableDataset`] implements an iterable Dataset backed by python generators.
[[autodoc]] datasets.IterableDataset
- from_generator
- remove_columns
- select_columns
- cast_column
- cast
- __iter__
- iter
- map
- rename_column
- filter
- shuffle
- skip
- take
- info
- split
- builder_name
- citation
- config_name
- dataset_size
- description
- download_checksums
- download_size
- features
- homepage
- license
- size_in_bytes
- supervised_keys
- version
## IterableDatasetDict
Dictionary with split names as keys ('train', 'test' for example), and `IterableDataset` objects as values.
[[autodoc]] datasets.IterableDatasetDict
- map
- filter
- shuffle
- with_format
- cast
- cast_column
- remove_columns
- rename_column
- rename_columns
- select_columns
## Features
[[autodoc]] datasets.Features
[[autodoc]] datasets.Sequence
[[autodoc]] datasets.ClassLabel
[[autodoc]] datasets.Value
[[autodoc]] datasets.Translation
[[autodoc]] datasets.TranslationVariableLanguages
[[autodoc]] datasets.Array2D
[[autodoc]] datasets.Array3D
[[autodoc]] datasets.Array4D
[[autodoc]] datasets.Array5D
[[autodoc]] datasets.Audio
[[autodoc]] datasets.Image
## MetricInfo
[[autodoc]] datasets.MetricInfo
## Metric
The base class `Metric` implements a Metric backed by one or several [`Dataset`].
[[autodoc]] datasets.Metric
## Filesystems
[[autodoc]] datasets.filesystems.S3FileSystem
[[autodoc]] datasets.filesystems.extract_path_from_uri
[[autodoc]] datasets.filesystems.is_remote_filesystem
## Fingerprint
[[autodoc]] datasets.fingerprint.Hasher
| datasets/docs/source/package_reference/main_classes.mdx/0 | {
"file_path": "datasets/docs/source/package_reference/main_classes.mdx",
"repo_id": "datasets",
"token_count": 1908
} | 68 |
# Use with PyTorch
This document is a quick introduction to using `datasets` with PyTorch, with a particular focus on how to get
`torch.Tensor` objects out of our datasets, and how to use a PyTorch `DataLoader` and a Hugging Face `Dataset`
with the best performance.
## Dataset format
By default, datasets return regular python objects: integers, floats, strings, lists, etc.
To get PyTorch tensors instead, you can set the format of the dataset to `pytorch` using [`Dataset.with_format`]:
```py
>>> from datasets import Dataset
>>> data = [[1, 2],[3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([1, 2])}
>>> ds[:2]
{'data': tensor([[1, 2],
[3, 4]])}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast zero-copy reads from arrays in the dataset to PyTorch tensors.
</Tip>
To load the data as tensors on a GPU, specify the `device` argument:
```py
>>> import torch
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> ds = ds.with_format("torch", device=device)
>>> ds[0]
{'data': tensor([1, 2], device='cuda:0')}
```
## N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists.
In particular, a PyTorch formatted dataset outputs nested lists instead of a single tensor:
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': [tensor([1, 2]), tensor([3, 4])]}
```
To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors:
```py
>>> from datasets import Dataset, Features, Array2D
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')})
>>> ds = Dataset.from_dict({"data": data}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]
{'data': tensor([[1, 2],
[3, 4]])}
>>> ds[:2]
{'data': tensor([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])}
```
## Other feature types
[`ClassLabel`] data are properly converted to tensors:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[:3]
{'label': tensor([0, 0, 1])}
```
String and binary objects are unchanged, since PyTorch only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["image"].shape
torch.Size([512, 512, 4])
>>> ds[0]
{'image': tensor([[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]], dtype=torch.uint8)}
>>> ds[:2]["image"].shape
torch.Size([2, 512, 512, 4])
>>> ds[:2]
{'image': tensor([[[[255, 215, 106, 255],
[255, 215, 106, 255],
...,
[255, 255, 255, 255],
[255, 255, 255, 255]]]], dtype=torch.uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio, Image
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("torch")
>>> ds[0]["audio"]["array"]
tensor([ 6.1035e-05, 1.5259e-05, 1.6785e-04, ..., -1.5259e-05,
-1.5259e-05, 1.5259e-05])
>>> ds[0]["audio"]["sampling_rate"]
tensor(44100)
```
## Data loading
Like `torch.utils.data.Dataset` objects, a [`Dataset`] can be passed directly to a PyTorch `DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(16)
>>> label = np.random.randint(0, 2, size=16)
>>> ds = Dataset.from_dict({"data": data, "label": label}).with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=4)
>>> for batch in dataloader:
... print(batch)
{'data': tensor([0.0047, 0.4979, 0.6726, 0.8105]), 'label': tensor([0, 1, 0, 1])}
{'data': tensor([0.4832, 0.2723, 0.4259, 0.2224]), 'label': tensor([0, 0, 0, 0])}
{'data': tensor([0.5837, 0.3444, 0.4658, 0.6417]), 'label': tensor([0, 1, 0, 0])}
{'data': tensor([0.7022, 0.1225, 0.7228, 0.8259]), 'label': tensor([1, 1, 1, 1])}
```
### Optimize data loading
There are several ways you can increase the speed your data is loaded which can save you time, especially if you are working with large datasets.
PyTorch offers parallelized data loading, retrieving batches of indices instead of individually, and streaming to iterate over the dataset without downloading it on disk.
#### Use multiple Workers
You can parallelize data loading with the `num_workers` argument of a PyTorch `DataLoader` and get a higher throughput.
Under the hood, the `DataLoader` starts `num_workers` processes.
Each process reloads the dataset passed to the `DataLoader` and is used to query examples.
Reloading the dataset inside a worker doesn't fill up your RAM, since it simply memory-maps the dataset again from your disk.
```py
>>> import numpy as np
>>> from datasets import Dataset, load_from_disk
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).save_to_disk("my_dataset")
>>> ds = load_from_disk("my_dataset").with_format("torch")
>>> dataloader = DataLoader(ds, batch_size=32, num_workers=4)
```
### Stream data
Stream a dataset by loading it as an [`IterableDataset`]. This allows you to progressively iterate over a remote dataset without downloading it on disk and or over local data files.
Learn more about which type of dataset is best for your use case in the [choosing between a regular dataset or an iterable dataset](./about_mapstyle_vs_iterable) guide.
An iterable dataset from `datasets` inherits from `torch.utils.data.IterableDataset` so you can pass it to a `torch.utils.data.DataLoader`:
```py
>>> import numpy as np
>>> from datasets import Dataset, load_dataset
>>> from torch.utils.data import DataLoader
>>> data = np.random.rand(10_000)
>>> Dataset.from_dict({"data": data}).push_to_hub("<username>/my_dataset") # Upload to the Hugging Face Hub
>>> my_iterable_dataset = load_dataset("<username>/my_dataset", streaming=True, split="train")
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32)
```
If the dataset is split in several shards (i.e. if the dataset consists of multiple data files), then you can stream in parallel using `num_workers`:
```py
>>> my_iterable_dataset = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> my_iterable_dataset.n_shards
39
>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32, num_workers=4)
```
In this case each worker is given a subset of the list of shards to stream from.
### Distributed
To split your dataset across your training nodes, you can use [`datasets.distributed.split_dataset_by_node`]:
```python
import os
from datasets.distributed import split_dataset_by_node
ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
```
This works for both map-style datasets and iterable datasets.
The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`.
For map-style datasets:
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
For iterable datasets:
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data.
| datasets/docs/source/use_with_pytorch.mdx/0 | {
"file_path": "datasets/docs/source/use_with_pytorch.mdx",
"repo_id": "datasets",
"token_count": 3104
} | 69 |
# Metric Card for Code Eval
## Metric description
The CodeEval metric estimates the pass@k metric for code synthesis.
It implements the evaluation harness for the HumanEval problem solving dataset described in the paper ["Evaluating Large Language Models Trained on Code"](https://arxiv.org/abs/2107.03374).
## How to use
The Code Eval metric calculates how good are predictions given a set of references. Its arguments are:
`predictions`: a list of candidates to evaluate. Each candidate should be a list of strings with several code candidates to solve the problem.
`references`: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate.
`k`: number of code candidates to consider in the evaluation. The default value is `[1, 10, 100]`.
`num_workers`: the number of workers used to evaluate the candidate programs (The default value is `4`).
`timeout`: The maximum time taken to produce a prediction before it is considered a "timeout". The default value is `3.0` (i.e. 3 seconds).
```python
from datasets import load_metric
code_eval = load_metric("code_eval")
test_cases = ["assert add(2,3)==5"]
candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
```
N.B.
This metric exists to run untrusted model-generated code. Users are strongly encouraged not to do so outside of a robust security sandbox. Before running this metric and once you've taken the necessary precautions, you will need to set the `HF_ALLOW_CODE_EVAL` environment variable. Use it at your own risk:
```python
import os
os.environ["HF_ALLOW_CODE_EVAL"] = "1"`
```
## Output values
The Code Eval metric outputs two things:
`pass_at_k`: a dictionary with the pass rates for each k value defined in the arguments.
`results`: a dictionary with granular results of each unit test.
### Values from popular papers
The [original CODEX paper](https://arxiv.org/pdf/2107.03374.pdf) reported that the CODEX-12B model had a pass@k score of 28.8% at `k=1`, 46.8% at `k=10` and 72.3% at `k=100`. However, since the CODEX model is not open source, it is hard to verify these numbers.
## Examples
Full match at `k=1`:
```python
from datasets import load_metric
code_eval = load_metric("code_eval")
test_cases = ["assert add(2,3)==5"]
candidates = [["def add(a, b): return a+b"]]
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1])
print(pass_at_k)
{'pass@1': 1.0}
```
No match for k = 1:
```python
from datasets import load_metric
code_eval = load_metric("code_eval")
test_cases = ["assert add(2,3)==5"]
candidates = [["def add(a,b): return a*b"]]
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1])
print(pass_at_k)
{'pass@1': 0.0}
```
Partial match at k=1, full match at k=2:
```python
from datasets import load_metric
code_eval = load_metric("code_eval")
test_cases = ["assert add(2,3)==5"]
candidates = [["def add(a, b): return a+b", "def add(a,b): return a*b"]]
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
```
## Limitations and bias
As per the warning included in the metric code itself:
> This program exists to execute untrusted model-generated code. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the accompanying paper. Once you have read this disclaimer and taken appropriate precautions, uncomment the following line and proceed at your own risk:
More information about the limitations of the code can be found on the [Human Eval Github repository](https://github.com/openai/human-eval).
## Citation
```bibtex
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
## Further References
- [Human Eval Github repository](https://github.com/openai/human-eval)
- [OpenAI Codex website](https://openai.com/blog/openai-codex/)
| datasets/metrics/code_eval/README.md/0 | {
"file_path": "datasets/metrics/code_eval/README.md",
"repo_id": "datasets",
"token_count": 1698
} | 70 |
# Metric Card for FrugalScore
## Metric Description
FrugalScore is a reference-based metric for Natural Language Generation (NLG) model evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
The FrugalScore models are obtained by continuing the pretraining of small models on a synthetic dataset constructed using summarization, backtranslation and denoising models. During the training, the small models learn the internal mapping of the expensive metric, including any similarity function.
## How to use
When loading FrugalScore, you can indicate the model you wish to use to compute the score. The default model is `moussaKam/frugalscore_tiny_bert-base_bert-score`, and a full list of models can be found in the [Limitations and bias](#Limitations-and-bias) section.
```python
>>> from datasets import load_metric
>>> frugalscore = load_metric("frugalscore", "moussaKam/frugalscore_medium_bert-base_mover-score")
```
FrugalScore calculates how good are the predictions given some references, based on a set of scores.
The inputs it takes are:
`predictions`: a list of strings representing the predictions to score.
`references`: a list of string representing the references for each prediction.
Its optional arguments are:
`batch_size`: the batch size for predictions (default value is `32`).
`max_length`: the maximum sequence length (default value is `128`).
`device`: either "gpu" or "cpu" (default value is `None`).
```python
>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'], batch_size=16, max_length=64, device="gpu")
```
## Output values
The output of FrugalScore is a dictionary with the list of scores for each prediction-reference pair:
```python
{'scores': [0.6307541, 0.6449357]}
```
### Values from popular papers
The [original FrugalScore paper](https://arxiv.org/abs/2110.08559) reported that FrugalScore-Tiny retains 97.7/94.7% of the original performance compared to [BertScore](https://huggingface.co/metrics/bertscore) while running 54 times faster and having 84 times less parameters.
## Examples
Maximal values (exact match between `references` and `predictions`):
```python
>>> from datasets import load_metric
>>> frugalscore = load_metric("frugalscore")
>>> results = frugalscore.compute(predictions=['hello world'], references=['hello world'])
>>> print(results)
{'scores': [0.9891098]}
```
Partial values:
```python
>>> from datasets import load_metric
>>> frugalscore = load_metric("frugalscore")
>>> results = frugalscore.compute(predictions=['hello world'], references=['hugging face'])
>>> print(results)
{'scores': [0.42482382]}
```
## Limitations and bias
FrugalScore is based on [BertScore](https://huggingface.co/metrics/bertscore) and [MoverScore](https://arxiv.org/abs/1909.02622), and the models used are based on the original models used for these scores.
The full list of available models for FrugalScore is:
| FrugalScore | Student | Teacher | Method |
|----------------------------------------------------|-------------|----------------|------------|
| [moussaKam/frugalscore_tiny_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_bert-score) | BERT-tiny | BERT-Base | BERTScore |
| [moussaKam/frugalscore_small_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_bert-score) | BERT-small | BERT-Base | BERTScore |
| [moussaKam/frugalscore_medium_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_bert-score) | BERT-medium | BERT-Base | BERTScore |
| [moussaKam/frugalscore_tiny_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_roberta_bert-score) | BERT-tiny | RoBERTa-Large | BERTScore |
| [moussaKam/frugalscore_small_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_roberta_bert-score) | BERT-small | RoBERTa-Large | BERTScore |
| [moussaKam/frugalscore_medium_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_roberta_bert-score) | BERT-medium | RoBERTa-Large | BERTScore |
| [moussaKam/frugalscore_tiny_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_deberta_bert-score) | BERT-tiny | DeBERTa-XLarge | BERTScore |
| [moussaKam/frugalscore_small_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_deberta_bert-score) | BERT-small | DeBERTa-XLarge | BERTScore |
| [moussaKam/frugalscore_medium_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_deberta_bert-score) | BERT-medium | DeBERTa-XLarge | BERTScore |
| [moussaKam/frugalscore_tiny_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_mover-score) | BERT-tiny | BERT-Base | MoverScore |
| [moussaKam/frugalscore_small_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_mover-score) | BERT-small | BERT-Base | MoverScore |
| [moussaKam/frugalscore_medium_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_mover-score) | BERT-medium | BERT-Base | MoverScore |
Depending on the size of the model picked, the loading time will vary: the `tiny` models will load very quickly, whereas the `medium` ones can take several minutes, depending on your Internet connection.
## Citation
```bibtex
@article{eddine2021frugalscore,
title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},
author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis},
journal={arXiv preprint arXiv:2110.08559},
year={2021}
}
```
## Further References
- [Original FrugalScore code](https://github.com/moussaKam/FrugalScore)
- [FrugalScore paper](https://arxiv.org/abs/2110.08559)
| datasets/metrics/frugalscore/README.md/0 | {
"file_path": "datasets/metrics/frugalscore/README.md",
"repo_id": "datasets",
"token_count": 2127
} | 71 |
# Metric Card for Mean IoU
## Metric Description
IoU (Intersection over Union) is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth.
For binary (two classes) or multi-class segmentation, the *mean IoU* of the image is calculated by taking the IoU of each class and averaging them.
## How to Use
The Mean IoU metric takes two numeric arrays as input corresponding to the predicted and ground truth segmentations:
```python
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> predicted = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> ground_truth = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255)
```
### Inputs
**Mandatory inputs**
- `predictions` (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
- `references` (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
- `num_labels` (`int`): Number of classes (categories).
- `ignore_index` (`int`): Index that will be ignored during evaluation.
**Optional inputs**
- `nan_to_num` (`int`): If specified, NaN values will be replaced by the number defined by the user.
- `label_map` (`dict`): If specified, dictionary mapping old label indices to new label indices.
- `reduce_labels` (`bool`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. The default value is `False`.
### Output Values
The metric returns a dictionary with the following elements:
- `mean_iou` (`float`): Mean Intersection-over-Union (IoU averaged over all categories).
- `mean_accuracy` (`float`): Mean accuracy (averaged over all categories).
- `overall_accuracy` (`float`): Overall accuracy on all images.
- `per_category_accuracy` (`ndarray` of shape `(num_labels,)`): Per category accuracy.
- `per_category_iou` (`ndarray` of shape `(num_labels,)`): Per category IoU.
The values of all of the scores reported range from from `0.0` (minimum) and `1.0` (maximum).
Output Example:
```python
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
```
#### Values from Popular Papers
The [leaderboard for the CityScapes dataset](https://paperswithcode.com/sota/semantic-segmentation-on-cityscapes) reports a Mean IOU ranging from 64 to 84; that of [ADE20k](https://paperswithcode.com/sota/semantic-segmentation-on-ade20k) ranges from 30 to a peak of 59.9, indicating that the dataset is more difficult for current approaches (as of 2022).
### Examples
```python
>>> from datasets import load_metric
>>> import numpy as np
>>> mean_iou = load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predictions = [predicted_1, predicted_2, predicted_3]
>>> references = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predictions, references=references, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
```
## Limitations and Bias
Mean IOU is an average metric, so it will not show you where model predictions differ from the ground truth (i.e. if there are particular regions or classes that the model does poorly on). Further error analysis is needed to gather actional insights that can be used to inform model improvements.
## Citation(s)
```bibtex
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"
```
## Further References
- [Wikipedia article - Jaccard Index](https://en.wikipedia.org/wiki/Jaccard_index)
| datasets/metrics/mean_iou/README.md/0 | {
"file_path": "datasets/metrics/mean_iou/README.md",
"repo_id": "datasets",
"token_count": 1803
} | 72 |
# Metric Card for ROUGE
## Metric Description
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around the [Google Research reimplementation of ROUGE](https://github.com/google-research/google-research/tree/master/rouge)
## How to Use
At minimum, this metric takes as input a list of predictions and a list of references:
```python
>>> rouge = datasets.load_metric('rouge')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions,
... references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
```
### Inputs
- **predictions** (`list`): list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
- **references** (`list`): list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
- **rouge_types** (`list`): A list of rouge types to calculate. Defaults to `['rouge1', 'rouge2', 'rougeL', 'rougeLsum']`.
- Valid rouge types:
- `"rouge1"`: unigram (1-gram) based scoring
- `"rouge2"`: bigram (2-gram) based scoring
- `"rougeL"`: Longest common subsequence based scoring.
- `"rougeLSum"`: splits text using `"\n"`
- See [here](https://github.com/huggingface/datasets/issues/617) for more information
- **use_aggregator** (`boolean`): If True, returns aggregates. Defaults to `True`.
- **use_stemmer** (`boolean`): If `True`, uses Porter stemmer to strip word suffixes. Defaults to `False`.
### Output Values
The output is a dictionary with one entry for each rouge type in the input list `rouge_types`. If `use_aggregator=False`, each dictionary entry is a list of Score objects, with one score for each sentence. Each Score object includes the `precision`, `recall`, and `fmeasure`. E.g. if `rouge_types=['rouge1', 'rouge2']` and `use_aggregator=False`, the output is:
```python
{'rouge1': [Score(precision=1.0, recall=0.5, fmeasure=0.6666666666666666), Score(precision=1.0, recall=1.0, fmeasure=1.0)], 'rouge2': [Score(precision=0.0, recall=0.0, fmeasure=0.0), Score(precision=1.0, recall=1.0, fmeasure=1.0)]}
```
If `rouge_types=['rouge1', 'rouge2']` and `use_aggregator=True`, the output is of the following format:
```python
{'rouge1': AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)), 'rouge2': AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))}
```
The `precision`, `recall`, and `fmeasure` values all have a range of 0 to 1.
#### Values from Popular Papers
### Examples
An example without aggregation:
```python
>>> rouge = datasets.load_metric('rouge')
>>> predictions = ["hello goodbye", "ankh morpork"]
>>> references = ["goodbye", "general kenobi"]
>>> results = rouge.compute(predictions=predictions,
... references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results["rouge1"])
[Score(precision=0.5, recall=0.5, fmeasure=0.5), Score(precision=0.0, recall=0.0, fmeasure=0.0)]
```
The same example, but with aggregation:
```python
>>> rouge = datasets.load_metric('rouge')
>>> predictions = ["hello goodbye", "ankh morpork"]
>>> references = ["goodbye", "general kenobi"]
>>> results = rouge.compute(predictions=predictions,
... references=references,
... use_aggregator=True)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=0.0, recall=0.0, fmeasure=0.0), mid=Score(precision=0.25, recall=0.25, fmeasure=0.25), high=Score(precision=0.5, recall=0.5, fmeasure=0.5))
```
The same example, but only calculating `rouge_1`:
```python
>>> rouge = datasets.load_metric('rouge')
>>> predictions = ["hello goodbye", "ankh morpork"]
>>> references = ["goodbye", "general kenobi"]
>>> results = rouge.compute(predictions=predictions,
... references=references,
... rouge_types=['rouge_1'],
... use_aggregator=True)
>>> print(list(results.keys()))
['rouge1']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=0.0, recall=0.0, fmeasure=0.0), mid=Score(precision=0.25, recall=0.25, fmeasure=0.25), high=Score(precision=0.5, recall=0.5, fmeasure=0.5))
```
## Limitations and Bias
See [Schluter (2017)](https://aclanthology.org/E17-2007/) for an in-depth discussion of many of ROUGE's limits.
## Citation
```bibtex
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
```
## Further References
- This metrics is a wrapper around the [Google Research reimplementation of ROUGE](https://github.com/google-research/google-research/tree/master/rouge) | datasets/metrics/rouge/README.md/0 | {
"file_path": "datasets/metrics/rouge/README.md",
"repo_id": "datasets",
"token_count": 2244
} | 73 |
# Metric Card for SuperGLUE
## Metric description
This metric is used to compute the SuperGLUE evaluation metric associated to each of the subsets of the [SuperGLUE dataset](https://huggingface.co/datasets/super_glue).
SuperGLUE is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard.
## How to use
There are two steps: (1) loading the SuperGLUE metric relevant to the subset of the dataset being used for evaluation; and (2) calculating the metric.
1. **Loading the relevant SuperGLUE metric** : the subsets of SuperGLUE are the following: `boolq`, `cb`, `copa`, `multirc`, `record`, `rte`, `wic`, `wsc`, `wsc.fixed`, `axb`, `axg`.
More information about the different subsets of the SuperGLUE dataset can be found on the [SuperGLUE dataset page](https://huggingface.co/datasets/super_glue) and on the [official dataset website](https://super.gluebenchmark.com/).
2. **Calculating the metric**: the metric takes two inputs : one list with the predictions of the model to score and one list of reference labels. The structure of both inputs depends on the SuperGlUE subset being used:
Format of `predictions`:
- for `record`: list of question-answer dictionaries with the following keys:
- `idx`: index of the question as specified by the dataset
- `prediction_text`: the predicted answer text
- for `multirc`: list of question-answer dictionaries with the following keys:
- `idx`: index of the question-answer pair as specified by the dataset
- `prediction`: the predicted answer label
- otherwise: list of predicted labels
Format of `references`:
- for `record`: list of question-answers dictionaries with the following keys:
- `idx`: index of the question as specified by the dataset
- `answers`: list of possible answers
- otherwise: list of reference labels
```python
from datasets import load_metric
super_glue_metric = load_metric('super_glue', 'copa')
predictions = [0, 1]
references = [0, 1]
results = super_glue_metric.compute(predictions=predictions, references=references)
```
## Output values
The output of the metric depends on the SuperGLUE subset chosen, consisting of a dictionary that contains one or several of the following metrics:
`exact_match`: A given predicted string's exact match score is 1 if it is the exact same as its reference string, and is 0 otherwise. (See [Exact Match](https://huggingface.co/metrics/exact_match) for more information).
`f1`: the harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
`matthews_correlation`: a measure of the quality of binary and multiclass classifications (see [Matthews Correlation](https://huggingface.co/metrics/matthews_correlation) for more information). Its range of values is between -1 and +1, where a coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction.
### Values from popular papers
The [original SuperGLUE paper](https://arxiv.org/pdf/1905.00537.pdf) reported average scores ranging from 47 to 71.5%, depending on the model used (with all evaluation values scaled by 100 to make computing the average possible).
For more recent model performance, see the [dataset leaderboard](https://super.gluebenchmark.com/leaderboard).
## Examples
Maximal values for the COPA subset (which outputs `accuracy`):
```python
from datasets import load_metric
super_glue_metric = load_metric('super_glue', 'copa') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
predictions = [0, 1]
references = [0, 1]
results = super_glue_metric.compute(predictions=predictions, references=references)
print(results)
{'accuracy': 1.0}
```
Minimal values for the MultiRC subset (which outputs `pearson` and `spearmanr`):
```python
from datasets import load_metric
super_glue_metric = load_metric('super_glue', 'multirc')
predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
references = [1,0]
results = super_glue_metric.compute(predictions=predictions, references=references)
print(results)
{'exact_match': 0.0, 'f1_m': 0.0, 'f1_a': 0.0}
```
Partial match for the COLA subset (which outputs `matthews_correlation`)
```python
from datasets import load_metric
super_glue_metric = load_metric('super_glue', 'axb')
references = [0, 1]
predictions = [1,1]
results = super_glue_metric.compute(predictions=predictions, references=references)
print(results)
{'matthews_correlation': 0.0}
```
## Limitations and bias
This metric works only with datasets that have the same format as the [SuperGLUE dataset](https://huggingface.co/datasets/super_glue).
The dataset also includes Winogender, a subset of the dataset that is designed to measure gender bias in coreference resolution systems. However, as noted in the SuperGLUE paper, this subset has its limitations: *"It offers only positive predictive value: A poor bias score is clear evidence that a model exhibits gender bias, but a good score does not mean that the model is unbiased.[...] Also, Winogender does not cover all forms of social bias, or even all forms of gender. For instance, the version of the data used here offers no coverage of gender-neutral they or non-binary pronouns."
## Citation
```bibtex
@article{wang2019superglue,
title={Super{GLUE}: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
```
## Further References
- [SuperGLUE benchmark homepage](https://super.gluebenchmark.com/)
| datasets/metrics/super_glue/README.md/0 | {
"file_path": "datasets/metrics/super_glue/README.md",
"repo_id": "datasets",
"token_count": 1767
} | 74 |
# Lint as: python3
"""HuggingFace/Datasets is an open library of datasets.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
Steps to make a release:
0. Prerequisites:
- Dependencies:
- twine: `pip install twine`
- Create an account in (and join the 'datasets' project):
- PyPI: https://pypi.org/
- Test PyPI: https://test.pypi.org/
- Don't break `transformers`: run the `transformers` CI using the `main` branch and make sure it's green.
- In `transformers`, use `datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets`
Add a step to install `datasets@main` after `save_cache` in .circleci/create_circleci_config.py:
```
steps.append({"run": {"name": "Install `datasets@main`", "command": 'pip uninstall datasets -y && pip install "datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets"'}})
```
- and then run the CI
1. Create the release branch from main branch:
```
git checkout main
git pull upstream main
git checkout -b release-VERSION
```
2. Change the version to the release VERSION in:
- __init__.py
- setup.py
3. Commit these changes, push and create a Pull Request:
```
git add -u
git commit -m "Release: VERSION"
git push upstream release-VERSION
```
- Go to: https://github.com/huggingface/datasets/pull/new/release-VERSION
- Create pull request
4. From your local release branch, build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
- First, delete any building directories that may exist from previous builds:
- build
- dist
- From the top level directory, build the wheel and the sources:
```
python setup.py bdist_wheel
python setup.py sdist
```
- You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the test PyPI server:
```
twine upload dist/* -r testpypi
```
Check that you can install it in a virtualenv/notebook by running:
```
pip install huggingface-hub fsspec aiohttp pyarrow-hotfix
pip install -U tqdm
pip install -i https://testpypi.python.org/pypi datasets
```
6. Upload the final version to the actual PyPI:
```
twine upload dist/* -r pypi
```
7. Make the release on GitHub once everything is looking hunky-dory:
- Merge the release Pull Request
- Create a new release: https://github.com/huggingface/datasets/releases/new
- Choose a tag: Introduce the new VERSION as tag, that will be created when you publish the release
- Create new tag VERSION on publish
- Release title: Introduce the new VERSION as well
- Describe the release
- Use "Generate release notes" button for automatic generation
- Publish release
8. Set the dev version
- Create the dev-version branch from the main branch:
```
git checkout main
git pull upstream main
git branch -D dev-version
git checkout -b dev-version
```
- Change the version to X.X.X+1.dev0 (e.g. VERSION=1.18.3 -> 1.18.4.dev0) in:
- __init__.py
- setup.py
- Commit these changes, push and create a Pull Request:
```
git add -u
git commit -m "Set dev version"
git push upstream dev-version
```
- Go to: https://github.com/huggingface/datasets/pull/new/dev-version
- Create pull request
- Merge the dev version Pull Request
"""
from setuptools import find_packages, setup
REQUIRED_PKGS = [
# For file locking
"filelock",
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# Backend and serialization.
# Minimum 12.0.0 to be able to concatenate extension arrays
"pyarrow>=12.0.0",
# As long as we allow pyarrow < 14.0.1, to fix vulnerability CVE-2023-47248
"pyarrow-hotfix",
# For smart caching dataset processing
"dill>=0.3.0,<0.3.9", # tmp pin until dill has official support for determinism see https://github.com/uqfoundation/dill/issues/19
# For performance gains with apache arrow
"pandas",
# for downloading datasets over HTTPS
"requests>=2.19.0",
# progress bars in download and scripts
"tqdm>=4.62.1",
# for fast hashing
"xxhash",
# for better multiprocessing
"multiprocess",
# to save datasets locally or on any filesystem
# minimum 2023.1.0 to support protocol=kwargs in fsspec's `open`, `get_fs_token_paths`, etc.: see https://github.com/fsspec/filesystem_spec/pull/1143
"fsspec[http]>=2023.1.0,<=2024.2.0",
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface-hub>=0.21.2",
# Utilities from PyPA to e.g., compare versions
"packaging",
# To parse YAML metadata from dataset cards
"pyyaml>=5.1",
]
AUDIO_REQUIRE = [
"soundfile>=0.12.1",
"librosa",
]
VISION_REQUIRE = [
"Pillow>=6.2.1",
]
BENCHMARKS_REQUIRE = [
"tensorflow==2.12.0",
"torch==2.0.1",
"transformers==4.30.1",
]
TESTS_REQUIRE = [
# test dependencies
"absl-py",
"joblib<1.3.0", # joblibspark doesn't support recent joblib versions
"joblibspark",
"pytest",
"pytest-datadir",
"pytest-xdist",
# optional dependencies
"apache-beam>=2.26.0; sys_platform != 'win32' and python_version<'3.10'", # doesn't support recent dill versions for recent python versions and on windows requires pyarrow<12.0.0
"elasticsearch<8.0.0", # 8.0 asks users to provide hosts or cloud_id when instantiating ElasticSearch()
"faiss-cpu>=1.6.4",
"jax>=0.3.14; sys_platform != 'win32'",
"jaxlib>=0.3.14; sys_platform != 'win32'",
"lz4",
"pyspark>=3.4", # https://issues.apache.org/jira/browse/SPARK-40991 fixed in 3.4.0
"py7zr",
"rarfile>=4.0",
"sqlalchemy",
"s3fs>=2021.11.1", # aligned with fsspec[http]>=2021.11.1; test only on python 3.7 for now
"tensorflow>=2.3,!=2.6.0,!=2.6.1; sys_platform != 'darwin' or platform_machine != 'arm64'",
"tensorflow-macos; sys_platform == 'darwin' and platform_machine == 'arm64'",
"tiktoken",
"torch>=2.0.0",
"soundfile>=0.12.1",
"transformers",
"typing-extensions>=4.6.1", # due to conflict between apache-beam and pydantic
"zstandard",
"polars[timezone]>=0.20.0",
]
METRICS_TESTS_REQUIRE = [
# metrics dependencies
"accelerate", # for frugalscore (calls transformers' Trainer)
"bert_score>=0.3.6",
"jiwer",
"langdetect",
"mauve-text",
"nltk",
"rouge_score",
"sacrebleu",
"sacremoses",
"scikit-learn",
"scipy",
"sentencepiece", # for bleurt
"seqeval",
"spacy>=3.0.0",
"tldextract",
# to speed up pip backtracking
"toml>=0.10.1",
"typer<0.5.0", # pinned to work with Spacy==3.4.3 on Windows: see https://github.com/tiangolo/typer/issues/427
"requests_file>=1.5.1",
"tldextract>=3.1.0",
"texttable>=1.6.3",
"Werkzeug>=1.0.1",
"six~=1.15.0",
]
TESTS_REQUIRE.extend(VISION_REQUIRE)
TESTS_REQUIRE.extend(AUDIO_REQUIRE)
QUALITY_REQUIRE = ["ruff>=0.3.0"]
DOCS_REQUIRE = [
# Might need to add doc-builder and some specific deps in the future
"s3fs",
# Following dependencies are required for the Python reference to be built properly
"transformers",
"torch",
"tensorflow>=2.2.0,!=2.6.0,!=2.6.1; sys_platform != 'darwin' or platform_machine != 'arm64'",
"tensorflow-macos; sys_platform == 'darwin' and platform_machine == 'arm64'",
]
EXTRAS_REQUIRE = {
"audio": AUDIO_REQUIRE,
"vision": VISION_REQUIRE,
"apache-beam": ["apache-beam>=2.26.0"],
"tensorflow": [
"tensorflow>=2.2.0,!=2.6.0,!=2.6.1; sys_platform != 'darwin' or platform_machine != 'arm64'",
"tensorflow-macos; sys_platform == 'darwin' and platform_machine == 'arm64'",
],
"tensorflow_gpu": ["tensorflow-gpu>=2.2.0,!=2.6.0,!=2.6.1"],
"torch": ["torch"],
"jax": ["jax>=0.3.14", "jaxlib>=0.3.14"],
"s3": ["s3fs"],
"streaming": [], # for backward compatibility
"dev": TESTS_REQUIRE + QUALITY_REQUIRE + DOCS_REQUIRE,
"tests": TESTS_REQUIRE,
"metrics-tests": METRICS_TESTS_REQUIRE,
"quality": QUALITY_REQUIRE,
"benchmarks": BENCHMARKS_REQUIRE,
"docs": DOCS_REQUIRE,
}
setup(
name="datasets",
version="2.18.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="HuggingFace community-driven open-source library of datasets",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="[email protected]",
url="https://github.com/huggingface/datasets",
download_url="https://github.com/huggingface/datasets/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
package_data={
"datasets": ["py.typed"],
"datasets.utils.resources": ["*.json", "*.yaml", "*.tsv"],
},
entry_points={"console_scripts": ["datasets-cli=datasets.commands.datasets_cli:main"]},
python_requires=">=3.8.0",
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="datasets machine learning datasets metrics",
zip_safe=False, # Required for mypy to find the py.typed file
)
| datasets/setup.py/0 | {
"file_path": "datasets/setup.py",
"repo_id": "datasets",
"token_count": 4180
} | 75 |
import contextlib
import copy
import fnmatch
import json
import math
import posixpath
import re
import warnings
from io import BytesIO
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import fsspec
import numpy as np
from fsspec.core import url_to_fs
from huggingface_hub import (
CommitInfo,
CommitOperationAdd,
CommitOperationDelete,
DatasetCard,
DatasetCardData,
HfApi,
)
from . import config
from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
from .features import Features
from .features.features import FeatureType
from .info import DatasetInfo, DatasetInfosDict
from .naming import _split_re
from .splits import NamedSplit, Split, SplitDict, SplitInfo
from .table import Table
from .tasks import TaskTemplate
from .utils import logging
from .utils.deprecation_utils import deprecated
from .utils.doc_utils import is_documented_by
from .utils.hub import list_files_info
from .utils.metadata import MetadataConfigs
from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
from .utils.typing import PathLike
logger = logging.get_logger(__name__)
class DatasetDict(dict):
"""A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, Dataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = list(self.items())
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
for dataset in self.values():
if hasattr(dataset, "_data"):
del dataset._data
if hasattr(dataset, "_indices"):
del dataset._indices
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
else:
available_suggested_splits = [
split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
]
suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
raise KeyError(
f"Invalid key: {k}. Please first select a split. For example: "
f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
f"Available splits: {sorted(self)}"
)
@property
def data(self) -> Dict[str, Table]:
"""The Apache Arrow tables backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.data
```
"""
self._check_values_type()
return {k: dataset.data for k, dataset in self.items()}
@property
def cache_files(self) -> Dict[str, Dict]:
"""The cache files containing the Apache Arrow table backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cache_files
{'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
```
"""
self._check_values_type()
return {k: dataset.cache_files for k, dataset in self.items()}
@property
def num_columns(self) -> Dict[str, int]:
"""Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def num_rows(self) -> Dict[str, int]:
"""Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_rows
{'test': 1066, 'train': 8530, 'validation': 1066}
```
"""
self._check_values_type()
return {k: dataset.num_rows for k, dataset in self.items()}
@property
def column_names(self) -> Dict[str, List[str]]:
"""Names of the columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
@property
def shape(self) -> Dict[str, Tuple[int]]:
"""Shape of each split of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
"""
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
def flatten(self, max_depth=16) -> "DatasetDict":
"""Flatten the Apache Arrow Table of each split (nested features are flatten).
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad")
>>> ds["train"].features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 10570
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
def unique(self, column: str) -> Dict[str, List]:
"""Return a list of the unique elements in a column for each split.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
column name (list all the column names with [`~datasets.Dataset.column_names`])
Returns:
Dict[`str`, `list`]: Dictionary of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.unique("label")
{'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
```
"""
self._check_values_type()
return {k: dataset.unique(column) for k, dataset in self.items()}
def cleanup_cache_files(self) -> Dict[str, int]:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be careful when running this command that no other process is currently using other cache files.
Return:
`Dict` with the number of removed files for each split
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cleanup_cache_files()
{'test': 0, 'train': 0, 'validation': 0}
```
"""
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"DatasetDict({{\n{repr}\n}})"
def cast(self, features: Features) -> "DatasetDict":
"""
Cast the dataset to a new set of features.
The transformation is applied to all the datasets of the dataset dictionary.
You can also remove a column using [`Dataset.map`] with `feature` but `cast`
is in-place (doesn't copy the data to a new dataset) and is thus faster.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name and order of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def cast_column(self, column: str, feature) -> "DatasetDict":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""
Remove one or several column(s) from each split in the dataset
and the features associated to the column(s).
The transformation is applied to all the splits of the dataset dictionary.
You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method
is in-place (doesn't copy the data to a new dataset) and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.remove_columns("label")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
"""
Rename a column in the dataset and move the features associated to the original column under the new column name.
The transformation is applied to all the datasets of the dataset dictionary.
You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method:
- takes care of moving the original features under the new column name.
- doesn't copy the data to a new dataset and is thus much faster.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_column("label", "label_new")
DatasetDict({
train: Dataset({
features: ['text', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`DatasetDict`]: A copy of the dataset with renamed columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
DatasetDict({
train: Dataset({
features: ['text_new', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""Select one or several column(s) from each split in the dataset and
the features associated to the column(s).
The transformation is applied to all the splits of the dataset
dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.select_columns("text")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
Args:
column (`str`):
The name of the column to cast.
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq")
>>> ds["train"].features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column("answer")
>>> ds["train"].features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict(
{k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
)
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
self._check_values_type()
old_format_type = {k: dataset._format_type for k, dataset in self.items()}
old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
for k, dataset in self.items():
dataset.set_format(
old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns).
The format is set for every dataset in the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects),
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
`new formatted columns = (all columns - previously unformatted columns)`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
The transformation is applied to all the datasets of the dataset dictionary.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
The transform is set for every dataset in the dataset dictionary
As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
Args:
transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in ``__getitem__``.
columns (`List[str]`, optional): columns to format in the output
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
If set to True, then the other un-formatted columns are kept with the output of the transform.
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
) -> "DatasetDict":
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
The format is set for every dataset in the dataset dictionary.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'tensorflow'}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
) -> "DatasetDict":
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
The transform is set for every dataset in the dataset dictionary
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
>>> ds = ds.with_transform(encode)
>>> ds["train"][0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
188, 1566, 7912, 14516, 6997, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does updated examples).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`,
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, default `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`[datasets.Features]`, *optional*, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds["train"][0:3]["text"]
['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'Review: effective but too-tepid biopic']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.filter(lambda x: x["label"] == 1)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 4265
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 533
})
test: Dataset({
features: ['text', 'label'],
num_rows: 533
})
})
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_names (`Dict[str, str]`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.flatten_indices(
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
)
for k, dataset in self.items()
}
)
def sort(
self,
column_names: Union[str, Sequence[str]],
reverse: Union[bool, Sequence[bool]] = False,
kind="deprecated",
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
kind (`str`, *optional*):
Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
<Deprecated version="2.8.0">
`kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
</Deprecated>
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes')
>>> ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['train']['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
self._check_values_type()
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.sort(
column_names=column_names,
reverse=reverse,
kind=kind,
null_placement=null_placement,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
seed: Optional[int] = None,
generators: Optional[Dict[str, np.random.Generator]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new Dataset where the rows are shuffled.
The transformation is applied to all the datasets of the dataset dictionary.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
seeds (`Dict[str, int]` or `int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
You can provide one `seed` per dataset in the dataset dictionary.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
generators (`Dict[str, *optional*, np.random.Generator]`):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
You have to provide one `generator` per dataset in the dataset dictionary.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
indices_cache_file_names (`Dict[str, str]`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices mappings instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"]["label"][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds["train"]["label"][:10]
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
```
"""
self._check_values_type()
if seed is not None and seeds is not None:
raise ValueError("Please specify seed or seeds, but not both")
seeds = seed if seed is not None else seeds
if seeds is None:
seeds = {k: None for k in self}
elif not isinstance(seeds, dict):
seeds = {k: seeds for k in self}
if generators is None:
generators = {k: None for k in self}
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.shuffle(
seed=seeds[k],
generator=generators[k],
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def save_to_disk(
self,
dataset_dict_path: PathLike,
fs="deprecated",
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[Dict[str, int]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
For [`Image`] and [`Audio`] data:
All the Image() and Audio() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_dict_path (`str`):
Path (e.g. `dataset/train`) or remote URI
(e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
saved to.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
You need to provide the number of shards for each dataset in the dataset dictionary.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
num_proc (`int`, *optional*, default `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```python
>>> dataset_dict.save_to_disk("path/to/dataset/directory")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs: fsspec.AbstractFileSystem
fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {}))
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
fs.makedirs(dataset_dict_path, exist_ok=True)
with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
json.dump({"splits": list(self)}, f)
for k, dataset in self.items():
dataset.save_to_disk(
posixpath.join(dataset_dict_path, k),
num_shards=num_shards.get(k),
max_shard_size=max_shard_size,
num_proc=num_proc,
storage_options=storage_options,
)
@staticmethod
def load_from_disk(
dataset_dict_path: PathLike,
fs="deprecated",
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "DatasetDict":
"""
Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
Args:
dataset_dict_path (`str`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset dict directory where the dataset dict will be loaded from.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`DatasetDict`]
Example:
```py
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs: fsspec.AbstractFileSystem
fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {}))
dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
if not fs.isfile(dataset_dict_json_path):
if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
)
with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
splits = json.load(f)["splits"]
dataset_dict = DatasetDict()
for k in splits:
dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
dataset_dict[k] = Dataset.load_from_disk(
dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
)
return dataset_dict
@staticmethod
def from_csv(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from CSV file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_json(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON Lines file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_parquet(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from Parquet file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from text file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the text file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@deprecated()
@is_documented_by(Dataset.prepare_for_task)
def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict":
self._check_values_type()
return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()})
@is_documented_by(Dataset.align_labels_with_mapping)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
self._check_values_type()
return DatasetDict(
{
k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
for k, dataset in self.items()
}
)
def push_to_hub(
self,
repo_id,
config_name: str = "default",
set_default: Optional[bool] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = False,
token: Optional[str] = None,
revision: Optional[str] = None,
branch="deprecated",
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[Dict[str, int]] = None,
embed_external_files: bool = True,
) -> CommitInfo:
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether the dataset repository should be set to private or not. Only affects repository creation:
a repository that already exists will not be affected by that parameter.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
branch (`str`, *optional*):
The git branch on which to push the dataset. This defaults to the default branch as specified
in your repository, which defaults to `"main"`.
<Deprecated version="2.15.0">
`branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
</Deprecated>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
if branch != "deprecated":
warnings.warn(
"'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'revision={branch}' instead.",
FutureWarning,
)
revision = branch
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None:
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions = []
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
create_pr=create_pr,
max_shard_size=max_shard_size,
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
)
additions += split_additions
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
repo_splits = [] # use a list to keep the order of the splits
deletions = []
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token):
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
elif fnmatch.fnmatch(
repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
):
repo_split = string_to_dict(
repo_file.rfilename,
glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
)["split"]
if repo_split not in repo_splits:
repo_splits.append(split)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
if set_default and config_name != "default":
if metadata_configs:
default_config_name = metadata_configs.get_default_config_name()
if default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
else:
_ = metadata_configs[default_config_name].pop("default")
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
additions.append(
CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
commit_info = api.create_commit(
repo_id,
operations=additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
else:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
] + (deletions if i == 0 else [])
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
logger.info(
f"Commit #{i+1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
return commit_info
class IterableDatasetDict(dict):
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"IterableDatasetDict({{\n{repr}\n}})"
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDatasetDict":
"""
Return a dataset with the specified format.
This method only supports the "torch" format for now.
The format is set to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*, defaults to `None`):
If set to "torch", the returned dataset
will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> def encode(example):
... return tokenizer(examples["text"], truncation=True, padding="max_length")
>>> ds = ds.map(encode, batched=True, remove_columns=["text"])
>>> ds = ds.with_format("torch")
```
"""
return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the `batch_size` should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> next(iter(ds["train"]))
{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
The filtering is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds["train"].take(3))
[{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
{'label': 0,
'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
return IterableDatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDatasetDict":
"""
Randomly shuffles the elements of this dataset.
The shuffling is applied to all the datasets of the dataset dictionary.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does `shuffle` the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffer and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.shuffle(seed=42)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
return IterableDatasetDict(
{
k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
for k, dataset in self.items()
}
)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
| datasets/src/datasets/dataset_dict.py/0 | {
"file_path": "datasets/src/datasets/dataset_dict.py",
"repo_id": "datasets",
"token_count": 47116
} | 76 |
import inspect
import os
import random
import shutil
import tempfile
import weakref
from functools import wraps
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import xxhash
from . import config
from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH
from .utils._dill import dumps
from .utils.deprecation_utils import deprecated
from .utils.logging import get_logger
if TYPE_CHECKING:
from .arrow_dataset import Dataset
logger = get_logger(__name__)
# Fingerprinting allows to have one deterministic fingerprint per dataset state.
# A dataset fingerprint is updated after each transform.
# Re-running the same transforms on a dataset in a different session results in the same fingerprint.
# This is possible thanks to a custom hashing function that works with most python objects.
# Fingerprinting is the main mechanism that enables caching.
# The caching mechanism allows to reload an existing cache file if it's already been computed.
#################
# Caching
#################
_CACHING_ENABLED = True
_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None
_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
class _TempCacheDir:
"""
A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
before deleting the directory itself to avoid permission errors on Windows.
"""
def __init__(self):
self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
self._finalizer = weakref.finalize(self, self._cleanup)
def _cleanup(self):
for dset in get_datasets_with_cache_file_in_temp_dir():
dset.__del__()
if os.path.exists(self.name):
try:
shutil.rmtree(self.name)
except Exception as e:
raise OSError(
f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
) from e
def cleanup(self):
if self._finalizer.detach():
self._cleanup()
def maybe_register_dataset_for_temp_dir_deletion(dataset):
"""
This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
to properly delete them before deleting the temporary directory.
The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
"""
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
return
global _DATASETS_WITH_TABLE_IN_TEMP_DIR
if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
_DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
if any(
Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
for cache_file in dataset.cache_files
):
_DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
def get_datasets_with_cache_file_in_temp_dir():
return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
def enable_caching():
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
_CACHING_ENABLED = True
def disable_caching():
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
_CACHING_ENABLED = False
@deprecated(
"Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets."
)
def set_caching_enabled(boolean: bool):
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
the ``download_mode`` parameter in :func:`datasets.load_dataset`.
"""
global _CACHING_ENABLED
_CACHING_ENABLED = bool(boolean)
def is_caching_enabled() -> bool:
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
return bool(_CACHING_ENABLED)
def get_temporary_cache_files_directory() -> str:
"""Return a directory that is deleted when session closes."""
global _TEMP_DIR_FOR_TEMP_CACHE_FILES
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
_TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir()
return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
#################
# Hashing
#################
@deprecated("Use `copyreg.pickle` to register a custom reducer.")
def hashregister(*types):
def proxy(func):
for t in types:
Hasher.dispatch[t] = func
return func
return proxy
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: Dict = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
@deprecated("Use `Hasher.hash` instead.")
def hash_default(cls, value: Any) -> str:
return cls.hash(value)
@classmethod
def hash(cls, value: Any) -> str:
return cls.hash_bytes(dumps(value))
def update(self, value: Any) -> None:
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
return self.m.hexdigest()
#################
# Fingerprinting
#################
fingerprint_rng = random.Random()
# we show a warning only once when fingerprinting fails to avoid spam
fingerprint_warnings: Dict[str, bool] = {}
def generate_fingerprint(dataset: "Dataset") -> str:
state = dataset.__dict__
hasher = Hasher()
for key in sorted(state):
if key == "_fingerprint":
continue
hasher.update(key)
hasher.update(state[key])
# hash data files last modification timestamps as well
for cache_file in dataset.cache_files:
hasher.update(os.path.getmtime(cache_file["filename"]))
return hasher.hexdigest()
def generate_random_fingerprint(nbits: int = 64) -> str:
return f"{fingerprint_rng.getrandbits(nbits):0{nbits//4}x}"
def update_fingerprint(fingerprint, transform, transform_args):
global fingerprint_warnings
hasher = Hasher()
hasher.update(fingerprint)
try:
hasher.update(transform)
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
else:
logger.info(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
for key in sorted(transform_args):
hasher.update(key)
try:
hasher.update(transform_args[key])
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
)
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
return hasher.hexdigest()
def validate_fingerprint(fingerprint: str, max_length=64):
"""
Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default,
so that the fingerprint can be used to name cache files without issues.
"""
if not isinstance(fingerprint, str) or not fingerprint:
raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.")
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in fingerprint:
raise ValueError(
f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. "
f"They could create issues when creating cache files."
)
if len(fingerprint) > max_length:
raise ValueError(
f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}."
"It could create issues when creating cache files."
)
def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str:
"""
Format a transform to the format that will be used to update the fingerprint.
"""
transform = f"{func.__module__}.{func.__qualname__}"
if version is not None:
transform += f"@{version}"
return transform
def format_kwargs_for_fingerprint(
func: Callable,
args: Tuple,
kwargs: Dict[str, Any],
use_kwargs: Optional[List[str]] = None,
ignore_kwargs: Optional[List[str]] = None,
randomized_function: bool = False,
) -> Dict[str, Any]:
"""
Format the kwargs of a transform to the format that will be used to update the fingerprint.
"""
kwargs_for_fingerprint = kwargs.copy()
if args:
params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
args = args[1:] # assume the first argument is the dataset
params = params[1:]
kwargs_for_fingerprint.update(zip(params, args))
else:
del kwargs_for_fingerprint[
next(iter(inspect.signature(func).parameters))
] # assume the first key is the dataset
# keep the right kwargs to be hashed to generate the fingerprint
if use_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
if ignore_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
if randomized_function: # randomized functions have `seed` and `generator` parameters
if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
kwargs_for_fingerprint["generator"] = np.random.default_rng(seed)
# remove kwargs that are the default values
default_values = {
p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
}
for default_varname, default_value in default_values.items():
if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value:
kwargs_for_fingerprint.pop(default_varname)
return kwargs_for_fingerprint
def fingerprint_transform(
inplace: bool,
use_kwargs: Optional[List[str]] = None,
ignore_kwargs: Optional[List[str]] = None,
fingerprint_names: Optional[List[str]] = None,
randomized_function: bool = False,
version: Optional[str] = None,
):
"""
Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
Args:
inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace.
Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
setting the fingerprint of the returned Dataset.
use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account
to update the fingerprint to the wrapped method that should take care of
setting the fingerprint of the returned Dataset. By default all the arguments are used.
ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account
to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]):
If the dataset transforms is not inplace and returns a DatasetDict, then it can require
several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
one fingerprint named after each element of fingerprint_names is going to be passed.
randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has
optional parameters "seed" and "generator", then you can set randomized_function to True.
This way, even if users set "seed" and "generator" to None, then the fingerprint is
going to be randomly generated depending on numpy's current state. In this case, the
generator is set to np.random.default_rng(np.random.get_state()[1][0]).
version (:obj:`str`, optional): version of the transform. The version is taken into account when
computing the fingerprint. If a datase transform changes (or at least if the output data
that are cached changes), then one should increase the version. If the version stays the
same, then old cached data could be reused that are not compatible with the new transform.
It should be in the format "MAJOR.MINOR.PATCH".
"""
if use_kwargs is not None and not isinstance(use_kwargs, list):
raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}")
if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):
raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}")
if inplace and fingerprint_names:
raise ValueError("fingerprint_names are only used when inplace is False")
fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
def _fingerprint(func):
if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):
raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature")
if randomized_function: # randomized function have seed and generator parameters
if "seed" not in func.__code__.co_varnames:
raise ValueError(f"'seed' must be in {func}'s signature")
if "generator" not in func.__code__.co_varnames:
raise ValueError(f"'generator' must be in {func}'s signature")
# this call has to be outside the wrapper or since __qualname__ changes in multiprocessing
transform = format_transform_for_fingerprint(func, version=version)
@wraps(func)
def wrapper(*args, **kwargs):
kwargs_for_fingerprint = format_kwargs_for_fingerprint(
func,
args,
kwargs,
use_kwargs=use_kwargs,
ignore_kwargs=ignore_kwargs,
randomized_function=randomized_function,
)
if args:
dataset: Dataset = args[0]
args = args[1:]
else:
dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters)))
# compute new_fingerprint and add it to the args of not in-place transforms
if inplace:
new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint)
else:
for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
if kwargs.get(fingerprint_name) is None:
kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
kwargs[fingerprint_name] = update_fingerprint(
dataset._fingerprint, transform, kwargs_for_fingerprint
)
else:
validate_fingerprint(kwargs[fingerprint_name])
# Call actual function
out = func(dataset, *args, **kwargs)
# Update fingerprint of in-place transforms + update in-place history of transforms
if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
dataset._fingerprint = new_fingerprint
return out
wrapper._decorator_name_ = "fingerprint"
return wrapper
return _fingerprint
| datasets/src/datasets/fingerprint.py/0 | {
"file_path": "datasets/src/datasets/fingerprint.py",
"repo_id": "datasets",
"token_count": 8037
} | 77 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
| datasets/src/datasets/io/spark.py/0 | {
"file_path": "datasets/src/datasets/io/spark.py",
"repo_id": "datasets",
"token_count": 787
} | 78 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
logger = datasets.utils.logging.get_logger(__name__)
_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]
_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]
_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]
_PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"]
@dataclass
class CsvConfig(datasets.BuilderConfig):
"""BuilderConfig for CSV."""
sep: str = ","
delimiter: Optional[str] = None
header: Optional[Union[int, List[int], str]] = "infer"
names: Optional[List[str]] = None
column_names: Optional[List[str]] = None
index_col: Optional[Union[int, str, List[int], List[str]]] = None
usecols: Optional[Union[List[int], List[str]]] = None
prefix: Optional[str] = None
mangle_dupe_cols: bool = True
engine: Optional[Literal["c", "python", "pyarrow"]] = None
converters: Dict[Union[int, str], Callable[[Any], Any]] = None
true_values: Optional[list] = None
false_values: Optional[list] = None
skipinitialspace: bool = False
skiprows: Optional[Union[int, List[int]]] = None
nrows: Optional[int] = None
na_values: Optional[Union[str, List[str]]] = None
keep_default_na: bool = True
na_filter: bool = True
verbose: bool = False
skip_blank_lines: bool = True
thousands: Optional[str] = None
decimal: str = "."
lineterminator: Optional[str] = None
quotechar: str = '"'
quoting: int = 0
escapechar: Optional[str] = None
comment: Optional[str] = None
encoding: Optional[str] = None
dialect: Optional[str] = None
error_bad_lines: bool = True
warn_bad_lines: bool = True
skipfooter: int = 0
doublequote: bool = True
memory_map: bool = False
float_precision: Optional[str] = None
chunksize: int = 10_000
features: Optional[datasets.Features] = None
encoding_errors: Optional[str] = "strict"
on_bad_lines: Literal["error", "warn", "skip"] = "error"
date_format: Optional[str] = None
def __post_init__(self):
if self.delimiter is not None:
self.sep = self.delimiter
if self.column_names is not None:
self.names = self.column_names
@property
def pd_read_csv_kwargs(self):
pd_read_csv_kwargs = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.2 deprecated arguments
if datasets.config.PANDAS_VERSION.release >= (2, 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class Csv(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self, files):
schema = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
dtype = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(csv_file_reader):
pa_table = pa.Table.from_pandas(df)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets/src/datasets/packaged_modules/csv/csv.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/csv/csv.py",
"repo_id": "datasets",
"token_count": 4003
} | 79 |
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/sql/sql.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/sql/sql.py",
"repo_id": "datasets",
"token_count": 1963
} | 80 |
# deprecated, please use the `filelock` package instead
from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0
BaseFileLock,
SoftFileLock,
Timeout,
UnixFileLock,
WindowsFileLock,
)
from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
| datasets/src/datasets/utils/filelock.py/0 | {
"file_path": "datasets/src/datasets/utils/filelock.py",
"repo_id": "datasets",
"token_count": 115
} | 81 |
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-specific utils import."""
import os
import warnings
from functools import partial
from math import ceil
from uuid import uuid4
import numpy as np
import pyarrow as pa
from multiprocess import get_context
try:
from multiprocess.shared_memory import SharedMemory
except ImportError:
SharedMemory = None # Version checks should prevent this being called on older Python versions
from .. import config
def minimal_tf_collate_fn(features):
if isinstance(features, dict): # case batch_size=None: nothing to collate
return features
elif config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
first = features[0]
batch = {}
for k, v in first.items():
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
elif isinstance(v, tf.Tensor):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
def minimal_tf_collate_fn_with_renaming(features):
batch = minimal_tf_collate_fn(features)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
return batch
def is_numeric_pa_type(pa_type):
if pa.types.is_list(pa_type):
return is_numeric_pa_type(pa_type.value_type)
return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type)
def is_numeric_feature(feature):
from .. import ClassLabel, Sequence, Value
from ..features.features import _ArrayXD
if isinstance(feature, Sequence):
return is_numeric_feature(feature.feature)
elif isinstance(feature, list):
return is_numeric_feature(feature[0])
elif isinstance(feature, _ArrayXD):
return is_numeric_pa_type(feature().storage_dtype)
elif isinstance(feature, Value):
return is_numeric_pa_type(feature())
elif isinstance(feature, ClassLabel):
return True
else:
return False
def np_get_batch(
indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False
):
if not isinstance(indices, np.ndarray):
indices = indices.numpy()
is_batched = True
# Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices
if isinstance(indices, np.integer):
batch = dataset[indices.item()]
is_batched = False
elif np.all(np.diff(indices) == 1):
batch = dataset[indices[0] : indices[-1] + 1]
elif isinstance(indices, np.ndarray):
batch = dataset[indices]
else:
raise RuntimeError("Unexpected type for indices: {}".format(type(indices)))
if cols_to_retain is not None:
batch = {
key: value
for key, value in batch.items()
if key in cols_to_retain or key in ("label", "label_ids", "labels")
}
if is_batched:
actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same
# Our collators expect a list of dicts, not a dict of lists/arrays, so we invert
batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]
batch = collate_fn(batch, **collate_fn_args)
if return_dict:
out_batch = {}
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch[col] = array
else:
out_batch = []
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch.append(array)
return out_batch
def dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess
equivalent is multiprocess_dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
# TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything
# to the NumPy multiprocessing path.
if hasattr(tf, "random_index_shuffle"):
random_index_shuffle = tf.random_index_shuffle
elif hasattr(tf.random.experimental, "index_shuffle"):
random_index_shuffle = tf.random.experimental.index_shuffle
else:
if len(dataset) > 10_000_000:
warnings.warn(
"to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. "
"If you are iterating over a dataset with a very large number of samples, consider "
"upgrading to TF >= 2.9."
)
random_index_shuffle = None
getter_fn = partial(
np_get_batch,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=False,
)
# This works because dictionaries always output in the same order
tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()]
@tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])
def fetch_function(indices):
output = tf.py_function(
getter_fn,
inp=[indices],
Tout=tout,
)
return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())}
tf_dataset = tf.data.Dataset.range(len(dataset))
if shuffle and random_index_shuffle is not None:
base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64))
def scan_random_index(state, index):
if tf.reduce_all(state == -1):
# This generates a new random seed once per epoch only,
# to ensure that we iterate over each sample exactly once per epoch
state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64)
shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1)
return state, shuffled_index
tf_dataset = tf_dataset.scan(base_seed, scan_random_index)
elif shuffle:
tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality())
if batch_size is not None:
tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
tf_dataset = tf_dataset.map(fetch_function)
if batch_size is not None:
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}
else:
# Ensure shape but remove batch dimension of output_signature[key].shape
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()}
return tf_dataset.map(ensure_shapes)
class SharedMemoryContext:
# This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
# The process that creates shared memory is always the one responsible for unlinking it in the end
def __init__(self):
self.created_shms = []
self.opened_shms = []
def get_shm(self, name, size, create):
shm = SharedMemory(size=int(size), name=name, create=create)
if create:
# We only unlink the ones we created in this context
self.created_shms.append(shm)
else:
# If we didn't create it, we only close it when done, we don't unlink it
self.opened_shms.append(shm)
return shm
def get_array(self, name, shape, dtype, create):
shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for shm in self.created_shms:
shm.close()
shm.unlink()
for shm in self.opened_shms:
shm.close()
class NumpyMultiprocessingGenerator:
def __init__(
self,
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
self.dataset = dataset
self.cols_to_retain = cols_to_retain
self.collate_fn = collate_fn
self.collate_fn_args = collate_fn_args
self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)]
# Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
self.columns_to_np_types = {
col: dtype if col not in self.string_columns else np.dtype("U1")
for col, dtype in columns_to_np_types.items()
}
self.output_signature = output_signature
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_remainder = drop_remainder
self.num_workers = num_workers
# Because strings are converted to characters, we need to add one extra dimension to the shape
self.columns_to_ranks = {
col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
for col, spec in output_signature.items()
}
def __iter__(self):
# Make sure we only spawn workers if they have work to do
num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
# Do the shuffling in iter so that it's done at the start of each epoch
per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
)
ctx = get_context("spawn")
names = []
shape_arrays = []
workers = []
array_ready_events = [ctx.Event() for _ in range(num_workers)]
array_loaded_events = [ctx.Event() for _ in range(num_workers)]
base_args = {
"dataset": self.dataset,
"cols_to_retain": self.cols_to_retain,
"collate_fn": self.collate_fn,
"collate_fn_args": self.collate_fn_args,
"columns_to_np_types": self.columns_to_np_types,
"columns_to_ranks": self.columns_to_ranks,
"string_columns": self.string_columns,
}
with SharedMemoryContext() as shm_ctx:
for i in range(num_workers):
worker_random_id = str(uuid4())
worker_name = f"dw_{i}_{worker_random_id}"[:10]
names.append(worker_name)
worker_shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
for col, rank in self.columns_to_ranks.items()
}
shape_arrays.append(worker_shape_arrays)
worker_indices = per_worker_batches[i]
if i == final_batch_worker and final_batch is not None:
final_batch_arg = final_batch
else:
final_batch_arg = None
worker_kwargs = {
"worker_name": worker_name,
"indices": worker_indices,
"extra_batch": final_batch_arg,
"array_ready_event": array_ready_events[i],
"array_loaded_event": array_loaded_events[i],
**base_args,
}
worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
worker.start()
workers.append(worker)
end_signal_received = False
while not end_signal_received:
for i in range(num_workers):
if not array_ready_events[i].wait(timeout=60):
raise TimeoutError("Data loading worker timed out!")
array_ready_events[i].clear()
array_shapes = shape_arrays[i]
if any(np.any(shape < 0) for shape in array_shapes.values()):
# Child processes send negative array shapes to indicate
# that no more data is going to be sent
end_signal_received = True
break
# Matt: Because array shapes are variable we recreate the shared memory each iteration.
# I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
# A future optimization, at the cost of some code complexity, could be to reuse shared memory
# between iterations, but this would require knowing in advance the maximum size, or having
# a system to only create a new memory block when a new maximum size is seen.
# Another potential optimization would be to figure out which memory copies are necessary,
# or whether we can yield objects straight out of shared memory.
with SharedMemoryContext() as batch_shm_ctx:
# This memory context only lasts long enough to copy everything out of the batch
arrays = {
col: batch_shm_ctx.get_array(
f"{names[i]}_{col}",
shape=shape,
dtype=self.columns_to_np_types[col],
create=False,
)
for col, shape in array_shapes.items()
}
# Copy everything out of shm because the memory
# will be unlinked by the child process at some point
arrays = {col: np.copy(arr) for col, arr in arrays.items()}
# Now we convert any unicode char arrays to strings
for string_col in self.string_columns:
arrays[string_col] = (
arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
)
yield arrays
array_loaded_events[i].set()
# Now we just do some cleanup
# Shared memory is cleaned up by the context manager, so we just make sure workers finish
for worker in workers:
worker.join()
def __call__(self):
return self
@staticmethod
def worker_loop(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
columns_to_ranks,
string_columns,
indices,
extra_batch,
worker_name,
array_ready_event,
array_loaded_event,
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
def send_batch_to_parent(indices):
batch = np_get_batch(
indices=indices,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=True,
)
# Now begins the fun part where we start shovelling shared memory at the parent process
out_arrays = {}
with SharedMemoryContext() as batch_shm_ctx:
# The batch shared memory context exists only as long as it takes for the parent process
# to read everything, after which it cleans everything up again
for col, cast_dtype in columns_to_np_types.items():
# Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
array = batch[col]
if col in string_columns:
# We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
# which have a fixed width of 4 bytes. The parent process will convert these back to strings.
array = array.view("U1").reshape(array.shape + (-1,))
shape_arrays[col][:] = array.shape
out_arrays[col] = batch_shm_ctx.get_array(
f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
)
out_arrays[col][:] = array
array_ready_event.set()
array_loaded_event.wait()
array_loaded_event.clear()
with SharedMemoryContext() as shm_ctx:
shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
for col, rank in columns_to_ranks.items()
}
for batch in indices:
send_batch_to_parent(batch)
if extra_batch is not None:
send_batch_to_parent(extra_batch)
# Now we send a batsignal to the parent process that we're done
for col, array in shape_arrays.items():
array[:] = -1
array_ready_event.set()
@staticmethod
def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
indices = np.arange(len(dataset))
if shuffle:
np.random.shuffle(indices)
num_samples = len(indices)
# We distribute the batches so that reading from the workers in round-robin order yields the exact
# order specified in indices. This is only important when shuffle is False, but we do it regardless.
incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
if drop_remainder or len(last_incomplete_batch) == 0:
last_incomplete_batch = None
indices = indices.reshape(-1, batch_size)
num_batches = len(indices)
final_batches_cutoff = num_batches - (num_batches % num_workers)
indices, final_batches = np.split(indices, [final_batches_cutoff])
indices = indices.reshape(-1, num_workers, batch_size)
per_worker_indices = np.split(indices, indices.shape[1], axis=1)
per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
# Distribute the final batches to the first workers
for i in range(len(final_batches)):
# len(final_batches) can be zero, and is always less than num_workers
per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
# Add the last incomplete batch to the next worker, which might be the first worker
if last_incomplete_batch is not None:
incomplete_batch_worker_idx = len(final_batches)
else:
incomplete_batch_worker_idx = None
return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx
def multiprocess_dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process
equivalent is dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
data_generator = NumpyMultiprocessingGenerator(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
num_workers=num_workers,
)
tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature)
if drop_remainder:
dataset_length = int(len(dataset) // batch_size)
else:
dataset_length = int(ceil(len(dataset) / batch_size))
return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
| datasets/src/datasets/utils/tf_utils.py/0 | {
"file_path": "datasets/src/datasets/utils/tf_utils.py",
"repo_id": "datasets",
"token_count": 11176
} | 82 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
NUM_SHARDS = 4
NUM_ITEMS_PER_SHARD = 3
class FailedTestError(RuntimeError):
pass
def gen(shards: List[str]):
for shard in shards:
for i in range(NUM_ITEMS_PER_SHARD):
yield {"i": i, "shard": shard}
def main():
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
parser = ArgumentParser()
parser.add_argument("--streaming", type=bool)
parser.add_argument("--local_rank", type=int)
parser.add_argument("--num_workers", type=int, default=0)
args = parser.parse_args()
streaming = args.streaming
num_workers = args.num_workers
gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]}
ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
if not streaming:
ds = Dataset.from_list(list(ds))
ds = split_dataset_by_node(ds, rank=rank, world_size=world_size)
dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers)
full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD
expected_local_size = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
local_size = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}")
if __name__ == "__main__":
main()
| datasets/tests/distributed_scripts/run_torch_distributed.py/0 | {
"file_path": "datasets/tests/distributed_scripts/run_torch_distributed.py",
"repo_id": "datasets",
"token_count": 617
} | 83 |
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
| datasets/tests/fixtures/hub.py/0 | {
"file_path": "datasets/tests/fixtures/hub.py",
"repo_id": "datasets",
"token_count": 2270
} | 84 |
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def jsonl_file_utf16_encoded(tmp_path):
filename = tmp_path / "file_utf16_encoded.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w", encoding="utf-16") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_strings(tmp_path):
filename = tmp_path / "file_with_list_of_strings.json"
data = textwrap.dedent(
"""\
[
"First text.",
"Second text.",
"Third text."
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
("json_file_with_list_of_strings", {}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
if file_fixture == "json_file_with_list_of_strings":
expected = {"text": ["First text.", "Second text.", "Third text."]}
else:
expected = {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
assert pa_table.to_pydict() == expected
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
| datasets/tests/packaged_modules/test_json.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_json.py",
"repo_id": "datasets",
"token_count": 2009
} | 85 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
FILE_CONTENT = """\
Text data.
Second line of data."""
FILE_PATH = "file"
@pytest.fixture(scope="session")
def zstd_path(tmp_path_factory):
path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd")
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture
def tmpfs_file(tmpfs):
with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f:
f.write(FILE_CONTENT)
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"])
def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
input_path = input_paths[compression_format]
cache_dir = tmp_path / "cache"
download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True)
extracted_path = cached_path(input_path, download_config=download_config)
with open(extracted_path) as f:
extracted_file_content = f.read()
with open(text_file) as f:
expected_file_content = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False])
@pytest.mark.parametrize("default_cache_dir", [True, False])
def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch):
custom_cache_dir = "custom_cache"
custom_extracted_dir = "custom_extracted_dir"
custom_extracted_path = tmp_path / "custom_extracted_path"
if default_extracted:
expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir)
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path))
expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
filename = xz_file
download_config = (
DownloadConfig(extract_compressed_file=True)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True)
)
extracted_file_path = cached_path(filename, download_config=download_config)
assert Path(extracted_file_path).parent.parts[-2:] == expected
def test_cached_path_local(text_file):
# input absolute path -> output absolute path
text_file_abs = str(Path(text_file).resolve())
assert os.path.samefile(cached_path(text_file_abs), text_file_abs)
# input relative path -> output absolute path
text_file = __file__
text_file_abs = str(Path(text_file).resolve())
text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd())))
assert os.path.samefile(cached_path(text_file_rel), text_file_abs)
def test_cached_path_missing_local(tmp_path):
# absolute path
missing_file = str(tmp_path.resolve() / "__missing_file__.txt")
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
# relative path
missing_file = "./__missing_file__.txt"
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
def test_get_from_cache_fsspec(tmpfs_file):
output_path = get_from_cache(f"tmp://{tmpfs_file}")
with open(output_path) as f:
output_file_content = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_cached_path_offline():
with pytest.raises(OfflineModeIsEnabled):
cached_path("https://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_http_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
http_get("https://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
http_head("https://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_ftp_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
ftp_get("ftp://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
ftp_head("ftp://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_fsspec_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
fsspec_get("s3://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
fsspec_head("s3://huggingface.co")
| datasets/tests/test_file_utils.py/0 | {
"file_path": "datasets/tests/test_file_utils.py",
"repo_id": "datasets",
"token_count": 2016
} | 86 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def add_one(i): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def test_parallel_backend_input():
with parallel_backend("spark"):
assert ParallelBackendConfig.backend_name == "spark"
lst = [1, 2, 3]
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=2)
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1])
def test_parallel_backend_map_nested(num_proc):
s1 = [1, 2]
s2 = {"a": 1, "b": 2}
s3 = {"a": [1, 2], "b": [3, 4]}
s4 = {"a": {"1": 1}, "b": 2}
s5 = {"a": 1, "b": 2, "c": 3, "d": 4}
expected_map_nested_s1 = [2, 3]
expected_map_nested_s2 = {"a": 2, "b": 3}
expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s4 = {"a": {"1": 2}, "b": 3}
expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark"):
assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1
assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2
assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3
assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4
assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5
| datasets/tests/test_parallel.py/0 | {
"file_path": "datasets/tests/test_parallel.py",
"repo_id": "datasets",
"token_count": 825
} | 87 |
- title: Unit 0. Welcome to the course
sections:
- local: unit0/introduction
title: Welcome to the course ๐ค
- local: unit0/setup
title: Setup
- local: unit0/discord101
title: Discord 101
- title: Unit 1. Introduction to Deep Reinforcement Learning
sections:
- local: unit1/introduction
title: Introduction
- local: unit1/what-is-rl
title: What is Reinforcement Learning?
- local: unit1/rl-framework
title: The Reinforcement Learning Framework
- local: unit1/tasks
title: The type of tasks
- local: unit1/exp-exp-tradeoff
title: The Exploration/ Exploitation tradeoff
- local: unit1/two-methods
title: The two main approaches for solving RL problems
- local: unit1/deep-rl
title: The โDeepโ in Deep Reinforcement Learning
- local: unit1/summary
title: Summary
- local: unit1/glossary
title: Glossary
- local: unit1/hands-on
title: Hands-on
- local: unit1/quiz
title: Quiz
- local: unit1/conclusion
title: Conclusion
- local: unit1/additional-readings
title: Additional Readings
- title: Bonus Unit 1. Introduction to Deep Reinforcement Learning with Huggy
sections:
- local: unitbonus1/introduction
title: Introduction
- local: unitbonus1/how-huggy-works
title: How Huggy works?
- local: unitbonus1/train
title: Train Huggy
- local: unitbonus1/play
title: Play with Huggy
- local: unitbonus1/conclusion
title: Conclusion
- title: Live 1. How the course work, Q&A, and playing with Huggy
sections:
- local: live1/live1
title: Live 1. How the course work, Q&A, and playing with Huggy ๐ถ
- title: Unit 2. Introduction to Q-Learning
sections:
- local: unit2/introduction
title: Introduction
- local: unit2/what-is-rl
title: What is RL? A short recap
- local: unit2/two-types-value-based-methods
title: The two types of value-based methods
- local: unit2/bellman-equation
title: The Bellman Equation, simplify our value estimation
- local: unit2/mc-vs-td
title: Monte Carlo vs Temporal Difference Learning
- local: unit2/mid-way-recap
title: Mid-way Recap
- local: unit2/mid-way-quiz
title: Mid-way Quiz
- local: unit2/q-learning
title: Introducing Q-Learning
- local: unit2/q-learning-example
title: A Q-Learning example
- local: unit2/q-learning-recap
title: Q-Learning Recap
- local: unit2/glossary
title: Glossary
- local: unit2/hands-on
title: Hands-on
- local: unit2/quiz2
title: Q-Learning Quiz
- local: unit2/conclusion
title: Conclusion
- local: unit2/additional-readings
title: Additional Readings
- title: Unit 3. Deep Q-Learning with Atari Games
sections:
- local: unit3/introduction
title: Introduction
- local: unit3/from-q-to-dqn
title: From Q-Learning to Deep Q-Learning
- local: unit3/deep-q-network
title: The Deep Q-Network (DQN)
- local: unit3/deep-q-algorithm
title: The Deep Q Algorithm
- local: unit3/glossary
title: Glossary
- local: unit3/hands-on
title: Hands-on
- local: unit3/quiz
title: Quiz
- local: unit3/conclusion
title: Conclusion
- local: unit3/additional-readings
title: Additional Readings
- title: Bonus Unit 2. Automatic Hyperparameter Tuning with Optuna
sections:
- local: unitbonus2/introduction
title: Introduction
- local: unitbonus2/optuna
title: Optuna
- local: unitbonus2/hands-on
title: Hands-on
- title: Unit 4. Policy Gradient with PyTorch
sections:
- local: unit4/introduction
title: Introduction
- local: unit4/what-are-policy-based-methods
title: What are the policy-based methods?
- local: unit4/advantages-disadvantages
title: The advantages and disadvantages of policy-gradient methods
- local: unit4/policy-gradient
title: Diving deeper into policy-gradient
- local: unit4/pg-theorem
title: (Optional) the Policy Gradient Theorem
- local: unit4/glossary
title: Glossary
- local: unit4/hands-on
title: Hands-on
- local: unit4/quiz
title: Quiz
- local: unit4/conclusion
title: Conclusion
- local: unit4/additional-readings
title: Additional Readings
- title: Unit 5. Introduction to Unity ML-Agents
sections:
- local: unit5/introduction
title: Introduction
- local: unit5/how-mlagents-works
title: How ML-Agents works?
- local: unit5/snowball-target
title: The SnowballTarget environment
- local: unit5/pyramids
title: The Pyramids environment
- local: unit5/curiosity
title: (Optional) What is curiosity in Deep Reinforcement Learning?
- local: unit5/hands-on
title: Hands-on
- local: unit5/bonus
title: Bonus. Learn to create your own environments with Unity and MLAgents
- local: unit5/quiz
title: Quiz
- local: unit5/conclusion
title: Conclusion
- title: Unit 6. Actor Critic methods with Robotics environments
sections:
- local: unit6/introduction
title: Introduction
- local: unit6/variance-problem
title: The Problem of Variance in Reinforce
- local: unit6/advantage-actor-critic
title: Advantage Actor Critic (A2C)
- local: unit6/hands-on
title: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym ๐ค
- local: unit6/quiz
title: Quiz
- local: unit6/conclusion
title: Conclusion
- local: unit6/additional-readings
title: Additional Readings
- title: Unit 7. Introduction to Multi-Agents and AI vs AI
sections:
- local: unit7/introduction
title: Introduction
- local: unit7/introduction-to-marl
title: An introduction to Multi-Agents Reinforcement Learning (MARL)
- local: unit7/multi-agent-setting
title: Designing Multi-Agents systems
- local: unit7/self-play
title: Self-Play
- local: unit7/hands-on
title: Let's train our soccer team to beat your classmates' teams (AI vs. AI)
- local: unit7/quiz
title: Quiz
- local: unit7/conclusion
title: Conclusion
- local: unit7/additional-readings
title: Additional Readings
- title: Unit 8. Part 1 Proximal Policy Optimization (PPO)
sections:
- local: unit8/introduction
title: Introduction
- local: unit8/intuition-behind-ppo
title: The intuition behind PPO
- local: unit8/clipped-surrogate-objective
title: Introducing the Clipped Surrogate Objective Function
- local: unit8/visualize
title: Visualize the Clipped Surrogate Objective Function
- local: unit8/hands-on-cleanrl
title: PPO with CleanRL
- local: unit8/conclusion
title: Conclusion
- local: unit8/additional-readings
title: Additional Readings
- title: Unit 8. Part 2 Proximal Policy Optimization (PPO) with Doom
sections:
- local: unit8/introduction-sf
title: Introduction
- local: unit8/hands-on-sf
title: PPO with Sample Factory and Doom
- local: unit8/conclusion-sf
title: Conclusion
- title: Bonus Unit 3. Advanced Topics in Reinforcement Learning
sections:
- local: unitbonus3/introduction
title: Introduction
- local: unitbonus3/model-based
title: Model-Based Reinforcement Learning
- local: unitbonus3/offline-online
title: Offline vs. Online Reinforcement Learning
- local: unitbonus3/generalisation
title: Generalisation Reinforcement Learning
- local: unitbonus3/rlhf
title: Reinforcement Learning from Human Feedback
- local: unitbonus3/decision-transformers
title: Decision Transformers and Offline RL
- local: unitbonus3/language-models
title: Language models in RL
- local: unitbonus3/curriculum-learning
title: (Automatic) Curriculum Learning for RL
- local: unitbonus3/envs-to-try
title: Interesting environments to try
- local: unitbonus3/learning-agents
title: An introduction to Unreal Learning Agents
- local: unitbonus3/godotrl
title: An Introduction to Godot RL
- local: unitbonus3/student-works
title: Students projects
- local: unitbonus3/rl-documentation
title: Brief introduction to RL documentation
- title: Certification and congratulations
sections:
- local: communication/conclusion
title: Congratulations
- local: communication/certification
title: Get your certificate of completion
| deep-rl-class/units/en/_toctree.yml/0 | {
"file_path": "deep-rl-class/units/en/_toctree.yml",
"repo_id": "deep-rl-class",
"token_count": 2743
} | 88 |
# Summary [[summary]]
That was a lot of information! Let's summarize:
- Reinforcement Learning is a computational approach of learning from actions. We build an agent that learns from the environment **by interacting with it through trial and error** and receiving rewards (negative or positive) as feedback.
- The goal of any RL agent is to maximize its expected cumulative reward (also called expected return) because RL is based on the **reward hypothesis**, which is that **all goals can be described as the maximization of the expected cumulative reward.**
- The RL process is a loop that outputs a sequence of **state, action, reward and next state.**
- To calculate the expected cumulative reward (expected return), we discount the rewards: the rewards that come sooner (at the beginning of the game) **are more probable to happen since they are more predictable than the long term future reward.**
- To solve an RL problem, you want to **find an optimal policy**. The policy is the โbrainโ of your agent, which will tell us **what action to take given a state.** The optimal policy is the one which **gives you the actions that maximize the expected return.**
- There are two ways to find your optimal policy:
1. By training your policy directly: **policy-based methods.**
2. By training a value function that tells us the expected return the agent will get at each state and use this function to define our policy: **value-based methods.**
- Finally, we speak about Deep RL because we introduce **deep neural networks to estimate the action to take (policy-based) or to estimate the value of a state (value-based)** hence the name โdeepโ.
| deep-rl-class/units/en/unit1/summary.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit1/summary.mdx",
"repo_id": "deep-rl-class",
"token_count": 382
} | 89 |
# Second Quiz [[quiz2]]
The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**.
### Q1: What is Q-Learning?
<Question
choices={[
{
text: "The algorithm we use to train our Q-function",
explain: "",
correct: true
},
{
text: "A value function",
explain: "It's an action-value function since it determines the value of being at a particular state and taking a specific action at that state",
},
{
text: "An algorithm that determines the value of being at a particular state and taking a specific action at that state",
explain: "Q-function is the function that determines the value of being at a particular state and taking a specific action at that state.",
},
{
text: "A table",
explain: "Q-learning is not a Q-table. The Q-function is the algorithm that will feed the Q-table."
}
]}
/>
### Q2: What is a Q-table?
<Question
choices={[
{
text: "An algorithm we use in Q-Learning",
explain: "",
},
{
text: "Q-table is the internal memory of our agent",
explain: "",
correct: true
},
{
text: "In Q-table each cell corresponds a state value",
explain: "Each cell corresponds to a state-action value pair value. Not a state value.",
}
]}
/>
### Q3: Why if we have an optimal Q-function Q* we have an optimal policy?
<details>
<summary>Solution</summary>
Because if we have an optimal Q-function, we have an optimal policy since we know for each state what is the best action to take.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="link value policy"/>
</details>
### Q4: Can you explain what is Epsilon-Greedy Strategy?
<details>
<summary>Solution</summary>
Epsilon Greedy Strategy is a policy that handles the exploration/exploitation trade-off.
The idea is that we define epsilon ษ = 1.0:
- With *probability 1 โ ษ* : we do exploitation (aka our agent selects the action with the highest state-action pair value).
- With *probability ษ* : we do exploration (trying random action).
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Epsilon Greedy"/>
</details>
### Q5: How do we update the Q value of a state, action pair?
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-ex.jpg" alt="Q Update exercise"/>
<details>
<summary>Solution</summary>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-solution.jpg" alt="Q Update exercise"/>
</details>
### Q6: What's the difference between on-policy and off-policy
<details>
<summary>Solution</summary>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="On/off policy"/>
</details>
Congrats on finishing this Quiz ๐ฅณ, if you missed some elements, take time to read again the chapter to reinforce (๐) your knowledge.
| deep-rl-class/units/en/unit2/quiz2.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit2/quiz2.mdx",
"repo_id": "deep-rl-class",
"token_count": 1097
} | 90 |
# Hands on
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit4/unit4.ipynb"}
]}
askForHelpUrl="http://hf.co/join/discord" />
Now that we've studied the theory behind Reinforce, **youโre ready to code your Reinforce agent with PyTorch**. And you'll test its robustness using CartPole-v1 and PixelCopter,.
You'll then be able to iterate and improve this implementation for more advanced environments.
<figure class="image table text-center m-0 w-full">
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/>
</figure>
To validate this hands-on for the certification process, you need to push your trained models to the Hub and:
- Get a result of >= 350 for `Cartpole-v1`
- Get a result of >= 5 for `PixelCopter`.
To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**.
**If you don't find your model, go to the bottom of the page and click on the refresh button.**
For more information about the certification process, check this section ๐ https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process
And you can check your progress here ๐ https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course
**To start the hands-on click on Open In Colab button** ๐ :
[](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit4/unit4.ipynb)
We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers.
By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments.
# Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness ๐ช
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/>
In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient).
Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**.
More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**.
To test its robustness, we're going to train it in 2 different simple environments:
- Cartpole-v1
- PixelcopterEnv
โฌ๏ธ Here is an example of what **you will achieve at the end of this notebook.** โฌ๏ธ
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/>
### ๐ฎ Environments:
- [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)
- [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html)
### ๐ RL-Library:
- Python
- PyTorch
We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues).
## Objectives of this notebook ๐
At the end of the notebook, you will:
- Be able to **code a Reinforce algorithm from scratch using PyTorch.**
- Be able to **test the robustness of your agent using simple environments.**
- Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score ๐ฅ.
## Prerequisites ๐๏ธ
Before diving into the notebook, you need to:
๐ฒ ๐ [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction)
# Let's code Reinforce algorithm from scratch ๐ฅ
## Some advice ๐ก
It's better to run this colab in a copy on your Google Drive, so that **if it times out** you still have the saved notebook on your Google Drive and do not need to fill everything in from scratch.
To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.`
## Set the GPU ๐ช
- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1">
- `Hardware Accelerator > GPU`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2">
## Create a virtual display ๐ฅ
During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).
The following cell will install the librairies and create and run a virtual screen ๐ฅ
```python
%%capture
!apt install python-opengl
!apt install ffmpeg
!apt install xvfb
!pip install pyvirtualdisplay
!pip install pyglet==1.5.1
```
```python
# Virtual display
from pyvirtualdisplay import Display
virtual_display = Display(visible=0, size=(1400, 900))
virtual_display.start()
```
## Install the dependencies ๐ฝ
The first step is to install the dependencies. Weโll install multiple ones:
- `gym`
- `gym-games`: Extra gym environments made with PyGame.
- `huggingface_hub`: The Hub works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others.
You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**.
The differences you'll encounter here:
- In `gym` we don't have `terminated` and `truncated` but only `done`.
- In `gym` using `env.step()` returns `state, reward, done, info`
You can learn more about the differences between Gym and Gymnasium here ๐ https://gymnasium.farama.org/content/migration-guide/
You can see here all the Reinforce models available ๐ https://huggingface.co/models?other=reinforce
And you can find all the Deep Reinforcement Learning models here ๐ https://huggingface.co/models?pipeline_tag=reinforcement-learning
```bash
!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt
```
## Import the packages ๐ฆ
In addition to importing the installed libraries, we also import:
- `imageio`: A library that will help us to generate a replay video
```python
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
# Gym
import gym
import gym_pygame
# Hugging Face Hub
from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub.
import imageio
```
## Check if we have a GPU
- Let's check if we have a GPU
- If it's the case you should see `device:cuda0`
```python
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
```
```python
print(device)
```
We're now ready to implement our Reinforce algorithm ๐ฅ
# First agent: Playing CartPole-v1 ๐ค
## Create the CartPole environment and understand how it works
### [The environment ๐ฎ](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)
### Why do we use a simple environment like CartPole-v1?
As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch, you need **to be sure that it works correctly and find bugs with easy environments before going deeper** as finding bugs will be much easier in simple environments.
> Try to have some โsign of lifeโ on toy problems
> Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step.
### The CartPole-v1 environment
> A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart.
So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.**
The episode ends if:
- The pole Angle is greater than ยฑ12ยฐ
- The Cart Position is greater than ยฑ2.4
- The episode length is greater than 500
We get a reward ๐ฐ of +1 every timestep that the Pole stays in the equilibrium.
```python
env_id = "CartPole-v1"
# Create the env
env = gym.make(env_id)
# Create the evaluation env
eval_env = gym.make(env_id)
# Get the state space and action space
s_size = env.observation_space.shape[0]
a_size = env.action_space.n
```
```python
print("_____OBSERVATION SPACE_____ \n")
print("The State Space is: ", s_size)
print("Sample observation", env.observation_space.sample()) # Get a random observation
```
```python
print("\n _____ACTION SPACE_____ \n")
print("The Action Space is: ", a_size)
print("Action Space Sample", env.action_space.sample()) # Take a random action
```
## Let's build the Reinforce Architecture
This implementation is based on three implementations:
- [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py)
- [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb)
- [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95)
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/reinforce.png" alt="Reinforce"/>
So we want:
- Two fully connected layers (fc1 and fc2).
- To use ReLU as activation function of fc1
- To use Softmax to output a probability distribution over actions
```python
class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
# Create two fully connected layers
def forward(self, x):
# Define the forward pass
# state goes to fc1 then we apply ReLU activation function
# fc1 outputs goes to fc2
# We output the softmax
def act(self, state):
"""
Given a state, take action
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = np.argmax(m)
return action.item(), m.log_prob(action)
```
### Solution
```python
class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = np.argmax(m)
return action.item(), m.log_prob(action)
```
I made a mistake, can you guess where?
- To find out let's make a forward pass:
```python
debug_policy = Policy(s_size, a_size, 64).to(device)
debug_policy.act(env.reset())
```
- Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor`
- It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.**
- Do you know why? Check the act function and try to see why it does not work.
Advice ๐ก: Something is wrong in this implementation. Remember that for the act function **we want to sample an action from the probability distribution over actions**.
### (Real) Solution
```python
class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)
```
By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**.
- Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that has the highest probability.
- We need to replace this with `action = m.sample()` which will sample an action from the probability distribution P(.|s)
### Let's build the Reinforce Training Algorithm
This is the Reinforce algorithm pseudocode:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/pg_pseudocode.png" alt="Policy gradient pseudocode"/>
- When we calculate the return Gt (line 6), we see that we calculate the sum of discounted rewards **starting at timestep t**.
- Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**.
- Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#don-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient.
We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95)
But overall the idea is to **compute the return at each timestep efficiently**.
The second question you may ask is **why do we minimize the loss**? Didn't we talk about Gradient Ascent, not Gradient Descent earlier?
- We want to maximize our utility function $J(\theta)$, but in PyTorch and TensorFlow, it's better to **minimize an objective function.**
- So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25.
- So we want to modify \\(theta \\) such that \\(\pi_\theta(a_3|s; \theta) > 0.25 \\)
- Because all P must sum to 1, max \\(pi_\theta(a_3|s; \theta)\\) will **minimize other action probability.**
- So we should tell PyTorch **to min \\(1 - \pi_\theta(a_3|s; \theta)\\).**
- This loss function approaches 0 as \\(\pi_\theta(a_3|s; \theta)\\) nears 1.
- So we are encouraging the gradient to max \\(\pi_\theta(a_3|s; \theta)\\)
```python
def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every):
# Help us to calculate the score during the training
scores_deque = deque(maxlen=100)
scores = []
# Line 3 of pseudocode
for i_episode in range(1, n_training_episodes+1):
saved_log_probs = []
rewards = []
state = # TODO: reset the environment
# Line 4 of pseudocode
for t in range(max_t):
action, log_prob = # TODO get the action
saved_log_probs.append(log_prob)
state, reward, done, _ = # TODO: take an env step
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
# Line 6 of pseudocode: calculate the return
returns = deque(maxlen=max_t)
n_steps = len(rewards)
# Compute the discounted returns at each timestep,
# as the sum of the gamma-discounted return at time t (G_t) + the reward at time t
# In O(N) time, where N is the number of time steps
# (this definition of the discounted return G_t follows the definition of this quantity
# shown at page 44 of Sutton&Barto 2017 2nd draft)
# G_t = r_(t+1) + r_(t+2) + ...
# Given this formulation, the returns at each timestep t can be computed
# by re-using the computed future returns G_(t+1) to compute the current return G_t
# G_t = r_(t+1) + gamma*G_(t+1)
# G_(t-1) = r_t + gamma* G_t
# (this follows a dynamic programming approach, with which we memorize solutions in order
# to avoid computing them multiple times)
# This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft)
# G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ...
## Given the above, we calculate the returns at timestep t as:
# gamma[t] * return[t] + reward[t]
#
## We compute this starting from the last timestep to the first, in order
## to employ the formula presented above and avoid redundant computations that would be needed
## if we were to do it from first to last.
## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps
## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1)
## a normal python list would instead require O(N) to do this.
for t in range(n_steps)[::-1]:
disc_return_t = (returns[0] if len(returns)>0 else 0)
returns.appendleft( ) # TODO: complete here
## standardization of the returns is employed to make training more stable
eps = np.finfo(np.float32).eps.item()
## eps is the smallest representable float, which is
# added to the standard deviation of the returns to avoid numerical instabilities
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
# Line 7:
policy_loss = []
for log_prob, disc_return in zip(saved_log_probs, returns):
policy_loss.append(-log_prob * disc_return)
policy_loss = torch.cat(policy_loss).sum()
# Line 8: PyTorch prefers gradient descent
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores
```
#### Solution
```python
def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every):
# Help us to calculate the score during the training
scores_deque = deque(maxlen=100)
scores = []
# Line 3 of pseudocode
for i_episode in range(1, n_training_episodes + 1):
saved_log_probs = []
rewards = []
state = env.reset()
# Line 4 of pseudocode
for t in range(max_t):
action, log_prob = policy.act(state)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
# Line 6 of pseudocode: calculate the return
returns = deque(maxlen=max_t)
n_steps = len(rewards)
# Compute the discounted returns at each timestep,
# as
# the sum of the gamma-discounted return at time t (G_t) + the reward at time t
#
# In O(N) time, where N is the number of time steps
# (this definition of the discounted return G_t follows the definition of this quantity
# shown at page 44 of Sutton&Barto 2017 2nd draft)
# G_t = r_(t+1) + r_(t+2) + ...
# Given this formulation, the returns at each timestep t can be computed
# by re-using the computed future returns G_(t+1) to compute the current return G_t
# G_t = r_(t+1) + gamma*G_(t+1)
# G_(t-1) = r_t + gamma* G_t
# (this follows a dynamic programming approach, with which we memorize solutions in order
# to avoid computing them multiple times)
# This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft)
# G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ...
## Given the above, we calculate the returns at timestep t as:
# gamma[t] * return[t] + reward[t]
#
## We compute this starting from the last timestep to the first, in order
## to employ the formula presented above and avoid redundant computations that would be needed
## if we were to do it from first to last.
## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps
## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1)
## a normal python list would instead require O(N) to do this.
for t in range(n_steps)[::-1]:
disc_return_t = returns[0] if len(returns) > 0 else 0
returns.appendleft(gamma * disc_return_t + rewards[t])
## standardization of the returns is employed to make training more stable
eps = np.finfo(np.float32).eps.item()
## eps is the smallest representable float, which is
# added to the standard deviation of the returns to avoid numerical instabilities
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
# Line 7:
policy_loss = []
for log_prob, disc_return in zip(saved_log_probs, returns):
policy_loss.append(-log_prob * disc_return)
policy_loss = torch.cat(policy_loss).sum()
# Line 8: PyTorch prefers gradient descent
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
print("Episode {}\tAverage Score: {:.2f}".format(i_episode, np.mean(scores_deque)))
return scores
```
## Train it
- We're now ready to train our agent.
- But first, we define a variable containing all the training hyperparameters.
- You can change the training parameters (and should ๐)
```python
cartpole_hyperparameters = {
"h_size": 16,
"n_training_episodes": 1000,
"n_evaluation_episodes": 10,
"max_t": 1000,
"gamma": 1.0,
"lr": 1e-2,
"env_id": env_id,
"state_space": s_size,
"action_space": a_size,
}
```
```python
# Create policy and place it to the device
cartpole_policy = Policy(
cartpole_hyperparameters["state_space"],
cartpole_hyperparameters["action_space"],
cartpole_hyperparameters["h_size"],
).to(device)
cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"])
```
```python
scores = reinforce(
cartpole_policy,
cartpole_optimizer,
cartpole_hyperparameters["n_training_episodes"],
cartpole_hyperparameters["max_t"],
cartpole_hyperparameters["gamma"],
100,
)
```
## Define evaluation method ๐
- Here we define the evaluation method that we're going to use to test our Reinforce agent.
```python
def evaluate_agent(env, max_steps, n_eval_episodes, policy):
"""
Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward.
:param env: The evaluation environment
:param n_eval_episodes: Number of episode to evaluate the agent
:param policy: The Reinforce agent
"""
episode_rewards = []
for episode in range(n_eval_episodes):
state = env.reset()
step = 0
done = False
total_rewards_ep = 0
for step in range(max_steps):
action, _ = policy.act(state)
new_state, reward, done, info = env.step(action)
total_rewards_ep += reward
if done:
break
state = new_state
episode_rewards.append(total_rewards_ep)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
return mean_reward, std_reward
```
## Evaluate our agent ๐
```python
evaluate_agent(
eval_env, cartpole_hyperparameters["max_t"], cartpole_hyperparameters["n_evaluation_episodes"], cartpole_policy
)
```
### Publish our trained model on the Hub ๐ฅ
Now that we saw we got good results after the training, we can publish our trained model on the hub ๐ค with one line of code.
Here's an example of a Model Card:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/modelcard.png"/>
### Push to the Hub
#### Do not modify this code
```python
from huggingface_hub import HfApi, snapshot_download
from huggingface_hub.repocard import metadata_eval_result, metadata_save
from pathlib import Path
import datetime
import json
import imageio
import tempfile
import os
```
```python
def record_video(env, policy, out_directory, fps=30):
"""
Generate a replay video of the agent
:param env
:param Qtable: Qtable of our agent
:param out_directory
:param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1)
"""
images = []
done = False
state = env.reset()
img = env.render(mode="rgb_array")
images.append(img)
while not done:
# Take the action (index) that have the maximum expected future reward given that state
action, _ = policy.act(state)
state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic
img = env.render(mode="rgb_array")
images.append(img)
imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps)
```
```python
def push_to_hub(repo_id,
model,
hyperparameters,
eval_env,
video_fps=30
):
"""
Evaluate, Generate a video and Upload a model to Hugging Face Hub.
This method does the complete pipeline:
- It evaluates the model
- It generates the model card
- It generates a replay video of the agent
- It pushes everything to the Hub
:param repo_id: repo_id: id of the model repository from the Hugging Face Hub
:param model: the pytorch model we want to save
:param hyperparameters: training hyperparameters
:param eval_env: evaluation environment
:param video_fps: how many frame per seconds to record our video replay
"""
_, repo_name = repo_id.split("/")
api = HfApi()
# Step 1: Create the repo
repo_url = api.create_repo(
repo_id=repo_id,
exist_ok=True,
)
with tempfile.TemporaryDirectory() as tmpdirname:
local_directory = Path(tmpdirname)
# Step 2: Save the model
torch.save(model, local_directory / "model.pt")
# Step 3: Save the hyperparameters to JSON
with open(local_directory / "hyperparameters.json", "w") as outfile:
json.dump(hyperparameters, outfile)
# Step 4: Evaluate the model and build JSON
mean_reward, std_reward = evaluate_agent(eval_env,
hyperparameters["max_t"],
hyperparameters["n_evaluation_episodes"],
model)
# Get datetime
eval_datetime = datetime.datetime.now()
eval_form_datetime = eval_datetime.isoformat()
evaluate_data = {
"env_id": hyperparameters["env_id"],
"mean_reward": mean_reward,
"n_evaluation_episodes": hyperparameters["n_evaluation_episodes"],
"eval_datetime": eval_form_datetime,
}
# Write a JSON file
with open(local_directory / "results.json", "w") as outfile:
json.dump(evaluate_data, outfile)
# Step 5: Create the model card
env_name = hyperparameters["env_id"]
metadata = {}
metadata["tags"] = [
env_name,
"reinforce",
"reinforcement-learning",
"custom-implementation",
"deep-rl-class"
]
# Add metrics
eval = metadata_eval_result(
model_pretty_name=repo_name,
task_pretty_name="reinforcement-learning",
task_id="reinforcement-learning",
metrics_pretty_name="mean_reward",
metrics_id="mean_reward",
metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}",
dataset_pretty_name=env_name,
dataset_id=env_name,
)
# Merges both dictionaries
metadata = {**metadata, **eval}
model_card = f"""
# **Reinforce** Agent playing **{env_id}**
This is a trained model of a **Reinforce** agent playing **{env_id}** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
"""
readme_path = local_directory / "README.md"
readme = ""
if readme_path.exists():
with readme_path.open("r", encoding="utf8") as f:
readme = f.read()
else:
readme = model_card
with readme_path.open("w", encoding="utf-8") as f:
f.write(readme)
# Save our metrics to Readme metadata
metadata_save(readme_path, metadata)
# Step 6: Record a video
video_path = local_directory / "replay.mp4"
record_video(env, model, video_path, video_fps)
# Step 7. Push everything to the Hub
api.upload_folder(
repo_id=repo_id,
folder_path=local_directory,
path_in_repo=".",
)
print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}")
```
By using `push_to_hub`, **you evaluate, record a replay, generate a model card of your agent, and push it to the Hub**.
This way:
- You can **showcase our work** ๐ฅ
- You can **visualize your agent playing** ๐
- You can **share an agent with the community that others can use** ๐พ
- You can **access a leaderboard ๐ to see how well your agent is performing compared to your classmates** ๐ https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard
To be able to share your model with the community there are three more steps to follow:
1๏ธโฃ (If it's not already done) create an account to HF โก https://huggingface.co/join
2๏ธโฃ Sign in and then, you need to store your authentication token from the Hugging Face website.
- Create a new token (https://huggingface.co/settings/tokens) **with write role**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token">
```python
notebook_login()
```
If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`)
3๏ธโฃ We're now ready to push our trained agent to the ๐ค Hub ๐ฅ using `package_to_hub()` function
```python
repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}}
push_to_hub(
repo_id,
cartpole_policy, # The model we want to save
cartpole_hyperparameters, # Hyperparameters
eval_env, # Evaluation environment
video_fps=30
)
```
Now that we tested the robustness of our implementation, let's try a more complex environment: PixelCopter ๐
## Second agent: PixelCopter ๐
### Study the PixelCopter environment ๐
- [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html)
```python
env_id = "Pixelcopter-PLE-v0"
env = gym.make(env_id)
eval_env = gym.make(env_id)
s_size = env.observation_space.shape[0]
a_size = env.action_space.n
```
```python
print("_____OBSERVATION SPACE_____ \n")
print("The State Space is: ", s_size)
print("Sample observation", env.observation_space.sample()) # Get a random observation
```
```python
print("\n _____ACTION SPACE_____ \n")
print("The Action Space is: ", a_size)
print("Action Space Sample", env.action_space.sample()) # Take a random action
```
The observation space (7) ๐:
- player y position
- player velocity
- player distance to floor
- player distance to ceiling
- next block x distance to player
- next blocks top y location
- next blocks bottom y location
The action space(2) ๐ฎ:
- Up (press accelerator)
- Do nothing (don't press accelerator)
The reward function ๐ฐ:
- For each vertical block it passes, it gains a positive reward of +1. Each time a terminal state is reached it receives a negative reward of -1.
### Define the new Policy ๐ง
- We need to have a deeper neural network since the environment is more complex
```python
class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
# Define the three layers here
def forward(self, x):
# Define the forward process here
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)
```
#### Solution
```python
class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, h_size * 2)
self.fc3 = nn.Linear(h_size * 2, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)
```
### Define the hyperparameters โ๏ธ
- Because this environment is more complex.
- Especially for the hidden size, we need more neurons.
```python
pixelcopter_hyperparameters = {
"h_size": 64,
"n_training_episodes": 50000,
"n_evaluation_episodes": 10,
"max_t": 10000,
"gamma": 0.99,
"lr": 1e-4,
"env_id": env_id,
"state_space": s_size,
"action_space": a_size,
}
```
### Train it
- We're now ready to train our agent ๐ฅ.
```python
# Create policy and place it to the device
# torch.manual_seed(50)
pixelcopter_policy = Policy(
pixelcopter_hyperparameters["state_space"],
pixelcopter_hyperparameters["action_space"],
pixelcopter_hyperparameters["h_size"],
).to(device)
pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"])
```
```python
scores = reinforce(
pixelcopter_policy,
pixelcopter_optimizer,
pixelcopter_hyperparameters["n_training_episodes"],
pixelcopter_hyperparameters["max_t"],
pixelcopter_hyperparameters["gamma"],
1000,
)
```
### Publish our trained model on the Hub ๐ฅ
```python
repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}}
push_to_hub(
repo_id,
pixelcopter_policy, # The model we want to save
pixelcopter_hyperparameters, # Hyperparameters
eval_env, # Evaluation environment
video_fps=30
)
```
## Some additional challenges ๐
The best way to learn **is to try things on your own**! As you saw, the current agent is not doing great. As a first suggestion, you can train for more steps. But also try to find better parameters.
In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top?
Here are some ideas to climb up the leaderboard:
* Train more steps
* Try different hyperparameters by looking at what your classmates have done ๐ https://huggingface.co/models?other=reinforce
* **Push your new trained model** on the Hub ๐ฅ
* **Improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle
frames as observation)?
________________________________________________________________________
**Congrats on finishing this unit**!ย There was a lot of information.
And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub ๐ฅณ.
Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle
frames as observation)?
In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents
to compete against other agents in a snowball fight and a soccer game.**
Sound fun? See you next time!
Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please ๐ [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9)
See you in Unit 5! ๐ฅ
### Keep Learning, stay awesome ๐ค
| deep-rl-class/units/en/unit4/hands-on.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit4/hands-on.mdx",
"repo_id": "deep-rl-class",
"token_count": 13495
} | 91 |
# Advantage Actor-Critic (A2C) [[advantage-actor-critic]]
## Reducing variance with Actor-Critic methods
The solution to reducing the variance of the Reinforce algorithm and training our agent faster and better is to use a combination of Policy-Based and Value-Based methods: *the Actor-Critic method*.
To understand the Actor-Critic, imagine you're playing a video game. You can play with a friend that will provide you with some feedback. You're the Actor and your friend is the Critic.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/ac.jpg" alt="Actor Critic"/>
You don't know how to play at the beginning, **so you try some actions randomly**. The Critic observes your action and **provides feedback**.
Learning from this feedback, **you'll update your policy and be better at playing that game.**
On the other hand, your friend (Critic) will also update their way to provide feedback so it can be better next time.
This is the idea behind Actor-Critic. We learn two function approximations:
- *A policy* that **controls how our agent acts**: \\( \pi_{\theta}(s) \\)
- *A value function* to assist the policy update by measuring how good the action taken is: \\( \hat{q}_{w}(s,a) \\)
## The Actor-Critic Process
Now that we have seen the Actor Critic's big picture, let's dive deeper to understand how the Actor and Critic improve together during the training.
As we saw, with Actor-Critic methods, there are two function approximations (two neural networks):
- *Actor*, a **policy function** parameterized by theta: \\( \pi_{\theta}(s) \\)
- *Critic*, a **value function** parameterized by w: \\( \hat{q}_{w}(s,a) \\)
Let's see the training process to understand how the Actor and Critic are optimized:
- At each timestep, t, we get the current state \\( S_t\\) from the environment and **pass it as input through our Actor and Critic**.
- Our Policy takes the state and **outputs an action** \\( A_t \\).
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step1.jpg" alt="Step 1 Actor Critic"/>
- The Critic takes that action also as input and, using \\( S_t\\) and \\( A_t \\), **computes the value of taking that action at that state: the Q-value**.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step2.jpg" alt="Step 2 Actor Critic"/>
- The action \\( A_t\\) performed in the environment outputs a new state \\( S_{t+1}\\) and a reward \\( R_{t+1} \\) .
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step3.jpg" alt="Step 3 Actor Critic"/>
- The Actor updates its policy parameters using the Q value.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step4.jpg" alt="Step 4 Actor Critic"/>
- Thanks to its updated parameters, the Actor produces the next action to take at \\( A_{t+1} \\) given the new state \\( S_{t+1} \\).
- The Critic then updates its value parameters.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step5.jpg" alt="Step 5 Actor Critic"/>
## Adding Advantage in Actor-Critic (A2C)
We can stabilize learning further by **using the Advantage function as Critic instead of the Action value function**.
The idea is that the Advantage function calculates the relative advantage of an action compared to the others possible at a state: **how taking that action at a state is better compared to the average value of the state**. It's subtracting the mean value of the state from the state action pair:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage1.jpg" alt="Advantage Function"/>
In other words, this function calculates **the extra reward we get if we take this action at that state compared to the mean reward we get at that state**.
The extra reward is what's beyond the expected value of that state.
- If A(s,a) > 0: our gradient is **pushed in that direction**.
- If A(s,a) < 0 (our action does worse than the average value of that state), **our gradient is pushed in the opposite direction**.
The problem with implementing this advantage function is that it requires two value functions โ \\( Q(s,a)\\) and \\( V(s)\\). Fortunately, **we can use the TD error as a good estimator of the advantage function.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage2.jpg" alt="Advantage Function"/>
| deep-rl-class/units/en/unit6/advantage-actor-critic.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit6/advantage-actor-critic.mdx",
"repo_id": "deep-rl-class",
"token_count": 1384
} | 92 |
# Conclusion
That's all for today. Congrats on finishing this Unit and the tutorial! โญ๏ธ
Now that you've successfully trained your Doom agent, why not try deathmatch? Remember, that's a much more complex level than the one you've just trained, **but it's a nice experiment and I advise you to try it.**
If you do it, don't hesitate to share your model in the `#rl-i-made-this` channel in our [discord server](https://www.hf.co/join/discord).
This concludes the last unit, but we are not finished yet! ๐ค The following **bonus unit includes some of the most interesting, advanced, and cutting edge work in Deep Reinforcement Learning**.
See you next time ๐ฅ
## Keep Learning, Stay awesome ๐ค
| deep-rl-class/units/en/unit8/conclusion-sf.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit8/conclusion-sf.mdx",
"repo_id": "deep-rl-class",
"token_count": 188
} | 93 |
# (Automatic) Curriculum Learning for RL
While most of the RL methods seen in this course work well in practice, there are some cases where using them alone fails. This can happen, for instance, when:
- the task to learn is hard and requires an **incremental acquisition of skills** (for instance when one wants to make a bipedal agent learn to go through hard obstacles, it must first learn to stand, then walk, then maybe jumpโฆ)
- there are variations in the environment (that affect the difficulty) and one wants its agent to be **robust** to them
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/bipedal.gif" alt="Bipedal"/>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/movable_creepers.gif" alt="Movable creepers"/>
<figcaption> <a href="https://developmentalsystems.org/TeachMyAgent/">TeachMyAgent</a> </figcaption>
</figure>
In such cases, it seems needed to propose different tasks to our RL agent and organize them such that the agent progressively acquires skills. This approach is called **Curriculum Learning** and usually implies a hand-designed curriculum (or set of tasks organized in a specific order). In practice, one can, for instance, control the generation of the environment, the initial states, or use Self-Play and control the level of opponents proposed to the RL agent.
As designing such a curriculum is not always trivial, the field of **Automatic Curriculum Learning (ACL) proposes to design approaches that learn to create such an organization of tasks in order to maximize the RL agentโs performances**. Portelas et al. proposed to define ACL as:
> โฆ a family of mechanisms that automatically adapt the distribution of training data by learning to adjust the selection of learning situations to the capabilities of RL agents.
>
As an example, OpenAI used **Domain Randomization** (they applied random variations on the environment) to make a robot hand solve Rubikโs Cubes.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/dr.jpg" alt="Dr"/>
<figcaption> <a href="https://openai.com/blog/solving-rubiks-cube/">OpenAI - Solving Rubikโs Cube with a Robot Hand</a></figcaption>
</figure>
Finally, you can play with the robustness of agents trained in the <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">TeachMyAgent</a> benchmark by controlling environment variations or even drawing the terrain ๐
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/demo.png" alt="Demo"/>
<figcaption> <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo</a></figcaption>
</figure>
## Further reading
For more information, we recommend that you check out the following resources:
### Overview of the field
- [Automatic Curriculum Learning For Deep RL: A Short Survey](https://arxiv.org/pdf/2003.04664.pdf)
- [Curriculum for Reinforcement Learning](https://lilianweng.github.io/posts/2020-01-29-curriculum-rl/)
### Recent methods
- [Evolving Curricula with Regret-Based Environment Design](https://arxiv.org/abs/2203.01302)
- [Curriculum Reinforcement Learning via Constrained Optimal Transport](https://proceedings.mlr.press/v162/klink22a.html)
- [Prioritized Level Replay](https://arxiv.org/abs/2010.03934)
## Author
This section was written by <a href="https://twitter.com/ClementRomac"> Clรฉment Romac </a>
| deep-rl-class/units/en/unitbonus3/curriculum-learning.mdx/0 | {
"file_path": "deep-rl-class/units/en/unitbonus3/curriculum-learning.mdx",
"repo_id": "deep-rl-class",
"token_count": 1058
} | 94 |
import argparse
import sys
sys.path.append(".")
from base_classes import InpaintingBenchmark # noqa: E402
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt",
type=str,
default="runwayml/stable-diffusion-v1-5",
choices=[
"runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-2-1",
"stabilityai/stable-diffusion-xl-base-1.0",
],
)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_inference_steps", type=int, default=50)
parser.add_argument("--model_cpu_offload", action="store_true")
parser.add_argument("--run_compile", action="store_true")
args = parser.parse_args()
benchmark_pipe = InpaintingBenchmark(args)
benchmark_pipe.benchmark(args)
| diffusers/benchmarks/benchmark_sd_inpainting.py/0 | {
"file_path": "diffusers/benchmarks/benchmark_sd_inpainting.py",
"repo_id": "diffusers",
"token_count": 362
} | 95 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
### Translating the Diffusers documentation into your language
As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language ๐.
**๐๏ธ Open an issue**
To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "๐ Translating a New Language?" from the "New issue" button.
Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list.
**๐ด Fork the repository**
First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page.
Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows:
```bash
git clone https://github.com/<YOUR-USERNAME>/diffusers.git
```
**๐ Copy-paste the English version with a new language code**
The documentation files are in one leading directory:
- [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language.
You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following:
```bash
cd ~/path/to/diffusers/docs
cp -r source/en source/<LANG-ID>
```
Here, `<LANG-ID>` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table.
**โ๏ธ Start translating**
The fun part comes - translating the text!
The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website.
> ๐ If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/<LANG-ID>/` directory!
The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml):
```yaml
- sections:
- local: pipeline_tutorial # Do not change this! Use the same name for your .md file
title: Pipelines for inference # Translate this!
...
title: Tutorials # Translate this!
```
Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter.
> ๐ If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
| diffusers/docs/TRANSLATING.md/0 | {
"file_path": "diffusers/docs/TRANSLATING.md",
"repo_id": "diffusers",
"token_count": 1100
} | 96 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Tiny AutoEncoder
Tiny AutoEncoder for Stable Diffusion (TAESD) was introduced in [madebyollin/taesd](https://github.com/madebyollin/taesd) by Ollin Boer Bohan. It is a tiny distilled version of Stable Diffusion's VAE that can quickly decode the latents in a [`StableDiffusionPipeline`] or [`StableDiffusionXLPipeline`] almost instantly.
To use with Stable Diffusion v-2.1:
```python
import torch
from diffusers import DiffusionPipeline, AutoencoderTiny
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=torch.float16)
pipe = pipe.to("cuda")
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(prompt, num_inference_steps=25).images[0]
image
```
To use with Stable Diffusion XL 1.0
```python
import torch
from diffusers import DiffusionPipeline, AutoencoderTiny
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
pipe = pipe.to("cuda")
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(prompt, num_inference_steps=25).images[0]
image
```
## AutoencoderTiny
[[autodoc]] AutoencoderTiny
## AutoencoderTinyOutput
[[autodoc]] models.autoencoders.autoencoder_tiny.AutoencoderTinyOutput
| diffusers/docs/source/en/api/models/autoencoder_tiny.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/autoencoder_tiny.md",
"repo_id": "diffusers",
"token_count": 670
} | 97 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Outputs
All model outputs are subclasses of [`~utils.BaseOutput`], data structures containing all the information returned by the model. The outputs can also be used as tuples or dictionaries.
For example:
```python
from diffusers import DDIMPipeline
pipeline = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32")
outputs = pipeline()
```
The `outputs` object is a [`~pipelines.ImagePipelineOutput`] which means it has an image attribute.
You can access each attribute as you normally would or with a keyword lookup, and if that attribute is not returned by the model, you will get `None`:
```python
outputs.images
outputs["images"]
```
When considering the `outputs` object as a tuple, it only considers the attributes that don't have `None` values.
For instance, retrieving an image by indexing into it returns the tuple `(outputs.images)`:
```python
outputs[:1]
```
<Tip>
To check a specific pipeline or model output, refer to its corresponding API documentation.
</Tip>
## BaseOutput
[[autodoc]] utils.BaseOutput
- to_tuple
## ImagePipelineOutput
[[autodoc]] pipelines.ImagePipelineOutput
## FlaxImagePipelineOutput
[[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput
## AudioPipelineOutput
[[autodoc]] pipelines.AudioPipelineOutput
## ImageTextPipelineOutput
[[autodoc]] ImageTextPipelineOutput
| diffusers/docs/source/en/api/outputs.md/0 | {
"file_path": "diffusers/docs/source/en/api/outputs.md",
"repo_id": "diffusers",
"token_count": 554
} | 98 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DiT
[Scalable Diffusion Models with Transformers](https://huggingface.co/papers/2212.09748) (DiT) is by William Peebles and Saining Xie.
The abstract from the paper is:
*We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops -- through increased transformer depth/width or increased number of input tokens -- consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512x512 and 256x256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.*
The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit).
<Tip>
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## DiTPipeline
[[autodoc]] DiTPipeline
- all
- __call__
## ImagePipelineOutput
[[autodoc]] pipelines.ImagePipelineOutput
| diffusers/docs/source/en/api/pipelines/dit.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/dit.md",
"repo_id": "diffusers",
"token_count": 532
} | 99 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Stable Diffusion XL
Stable Diffusion XL (SDXL) was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://huggingface.co/papers/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Mรผller, Joe Penna, and Robin Rombach.
The abstract from the paper is:
*We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators.*
## Tips
- Using SDXL with a DPM++ scheduler for less than 50 steps is known to produce [visual artifacts](https://github.com/huggingface/diffusers/issues/5433) because the solver becomes numerically unstable. To fix this issue, take a look at this [PR](https://github.com/huggingface/diffusers/pull/5541) which recommends for ODE/SDE solvers:
- set `use_karras_sigmas=True` or `lu_lambdas=True` to improve image quality
- set `euler_at_final=True` if you're using a solver with uniform step sizes (DPM++2M or DPM++2M SDE)
- Most SDXL checkpoints work best with an image size of 1024x1024. Image sizes of 768x768 and 512x512 are also supported, but the results aren't as good. Anything below 512x512 is not recommended and likely won't be for default checkpoints like [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0).
- SDXL can pass a different prompt for each of the text encoders it was trained on. We can even pass different parts of the same prompt to the text encoders.
- SDXL output images can be improved by making use of a refiner model in an image-to-image setting.
- SDXL offers `negative_original_size`, `negative_crops_coords_top_left`, and `negative_target_size` to negatively condition the model on image resolution and cropping parameters.
<Tip>
To learn how to use SDXL for various tasks, how to optimize performance, and other usage examples, take a look at the [Stable Diffusion XL](../../../using-diffusers/sdxl) guide.
Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the official base and refiner model checkpoints!
</Tip>
## StableDiffusionXLPipeline
[[autodoc]] StableDiffusionXLPipeline
- all
- __call__
## StableDiffusionXLImg2ImgPipeline
[[autodoc]] StableDiffusionXLImg2ImgPipeline
- all
- __call__
## StableDiffusionXLInpaintPipeline
[[autodoc]] StableDiffusionXLInpaintPipeline
- all
- __call__
| diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md",
"repo_id": "diffusers",
"token_count": 1005
} | 100 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Installation
๐ค Diffusers is tested on Python 3.8+, PyTorch 1.7.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:
- [PyTorch](https://pytorch.org/get-started/locally/) installation instructions
- [Flax](https://flax.readthedocs.io/en/latest/) installation instructions
## Install with pip
You should install ๐ค Diffusers in a [virtual environment](https://docs.python.org/3/library/venv.html).
If you're unfamiliar with Python virtual environments, take a look at this [guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
A virtual environment makes it easier to manage different projects and avoid compatibility issues between dependencies.
Start by creating a virtual environment in your project directory:
```bash
python -m venv .env
```
Activate the virtual environment:
```bash
source .env/bin/activate
```
You should also install ๐ค Transformers because ๐ค Diffusers relies on its models:
<frameworkcontent>
<pt>
Note - PyTorch only supports Python 3.8 - 3.11 on Windows.
```bash
pip install diffusers["torch"] transformers
```
</pt>
<jax>
```bash
pip install diffusers["flax"] transformers
```
</jax>
</frameworkcontent>
## Install with conda
After activating your virtual environment, with `conda` (maintained by the community):
```bash
conda install -c conda-forge diffusers
```
## Install from source
Before installing ๐ค Diffusers from source, make sure you have PyTorch and ๐ค Accelerate installed.
To install ๐ค Accelerate:
```bash
pip install accelerate
```
Then install ๐ค Diffusers from source:
```bash
pip install git+https://github.com/huggingface/diffusers
```
This command installs the bleeding edge `main` version rather than the latest `stable` version.
The `main` version is useful for staying up-to-date with the latest developments.
For instance, if a bug has been fixed since the last official release but a new release hasn't been rolled out yet.
However, this means the `main` version may not always be stable.
We strive to keep the `main` version operational, and most issues are usually resolved within a few hours or a day.
If you run into a problem, please open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) so we can fix it even sooner!
## Editable install
You will need an editable install if you'd like to:
* Use the `main` version of the source code.
* Contribute to ๐ค Diffusers and need to test changes in the code.
Clone the repository and install ๐ค Diffusers with the following commands:
```bash
git clone https://github.com/huggingface/diffusers.git
cd diffusers
```
<frameworkcontent>
<pt>
```bash
pip install -e ".[torch]"
```
</pt>
<jax>
```bash
pip install -e ".[flax]"
```
</jax>
</frameworkcontent>
These commands will link the folder you cloned the repository to and your Python library paths.
Python will now look inside the folder you cloned to in addition to the normal library paths.
For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.8/site-packages/`, Python will also search the `~/diffusers/` folder you cloned to.
<Tip warning={true}>
You must keep the `diffusers` folder if you want to keep using the library.
</Tip>
Now you can easily update your clone to the latest version of ๐ค Diffusers with the following command:
```bash
cd ~/diffusers/
git pull
```
Your Python environment will find the `main` version of ๐ค Diffusers on the next run.
## Cache
Model weights and files are downloaded from the Hub to a cache which is usually your home directory. You can change the cache location by specifying the `HF_HOME` or `HUGGINFACE_HUB_CACHE` environment variables or configuring the `cache_dir` parameter in methods like [`~DiffusionPipeline.from_pretrained`].
Cached files allow you to run ๐ค Diffusers offline. To prevent ๐ค Diffusers from connecting to the internet, set the `HF_HUB_OFFLINE` environment variable to `True` and ๐ค Diffusers will only load previously downloaded files in the cache.
```shell
export HF_HUB_OFFLINE=True
```
For more details about managing and cleaning the cache, take a look at the [caching](https://huggingface.co/docs/huggingface_hub/guides/manage-cache) guide.
## Telemetry logging
Our library gathers telemetry information during [`~DiffusionPipeline.from_pretrained`] requests.
The data gathered includes the version of ๐ค Diffusers and PyTorch/Flax, the requested model or pipeline class,
and the path to a pretrained checkpoint if it is hosted on the Hugging Face Hub.
This usage data helps us debug issues and prioritize new features.
Telemetry is only sent when loading models and pipelines from the Hub,
and it is not collected if you're loading local files.
We understand that not everyone wants to share additional information,and we respect your privacy.
You can disable telemetry collection by setting the `DISABLE_TELEMETRY` environment variable from your terminal:
On Linux/MacOS:
```bash
export DISABLE_TELEMETRY=YES
```
On Windows:
```bash
set DISABLE_TELEMETRY=YES
```
| diffusers/docs/source/en/installation.md/0 | {
"file_path": "diffusers/docs/source/en/installation.md",
"repo_id": "diffusers",
"token_count": 1585
} | 101 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ControlNet
[ControlNet](https://hf.co/papers/2302.05543) models are adapters trained on top of another pretrained model. It allows for a greater degree of control over image generation by conditioning the model with an additional input image. The input image can be a canny edge, depth map, human pose, and many more.
If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax.
This guide will explore the [train_controlnet.py](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) training script to help you become familiar with it, and how you can adapt it for your own use-case.
Before running the script, make sure you install the library from source:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then navigate to the example folder containing the training script and install the required dependencies for the script you're using:
<hfoptions id="installation">
<hfoption id="PyTorch">
```bash
cd examples/controlnet
pip install -r requirements.txt
```
</hfoption>
<hfoption id="Flax">
If you have access to a TPU, the Flax training script runs even faster! Let's run the training script on the [Google Cloud TPU VM](https://cloud.google.com/tpu/docs/run-calculation-jax). Create a single TPU v4-8 VM and connect to it:
```bash
ZONE=us-central2-b
TPU_TYPE=v4-8
VM_NAME=hg_flax
gcloud alpha compute tpus tpu-vm create $VM_NAME \
--zone $ZONE \
--accelerator-type $TPU_TYPE \
--version tpu-vm-v4-base
gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \
```
Install JAX 0.4.5:
```bash
pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
```
Then install the required dependencies for the Flax script:
```bash
cd examples/controlnet
pip install -r requirements_flax.txt
```
</hfoption>
</hfoptions>
<Tip>
๐ค Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the ๐ค Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more.
</Tip>
Initialize an ๐ค Accelerate environment:
```bash
accelerate config
```
To setup a default ๐ค Accelerate environment without choosing any configurations:
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```bash
from accelerate.utils import write_basic_config
write_basic_config()
```
Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script.
<Tip>
The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) and let us know if you have any questions or concerns.
</Tip>
## Script parameters
The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L231) function. This function provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like.
For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command:
```bash
accelerate launch train_controlnet.py \
--mixed_precision="fp16"
```
Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant parameters for ControlNet:
- `--max_train_samples`: the number of training samples; this can be lowered for faster training, but if you want to stream really large datasets, you'll need to include this parameter and the `--streaming` parameter in your training command
- `--gradient_accumulation_steps`: number of update steps to accumulate before the backward pass; this allows you to train with a bigger batch size than your GPU memory can typically handle
### Min-SNR weighting
The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script.
Add the `--snr_gamma` parameter and set it to the recommended value of 5.0:
```bash
accelerate launch train_controlnet.py \
--snr_gamma=5.0
```
## Training script
As with the script parameters, a general walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the relevant parts of the ControlNet script.
The training script has a [`make_train_dataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L582) function for preprocessing the dataset with image transforms and caption tokenization. You'll see that in addition to the usual caption tokenization and image transforms, the script also includes transforms for the conditioning image.
<Tip>
If you're streaming a dataset on a TPU, performance may be bottlenecked by the ๐ค Datasets library which is not optimized for images. To ensure maximum throughput, you're encouraged to explore other dataset formats like [WebDataset](https://webdataset.github.io/webdataset/), [TorchData](https://github.com/pytorch/data), and [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds).
</Tip>
```py
conditioning_image_transforms = transforms.Compose(
[
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(args.resolution),
transforms.ToTensor(),
]
)
```
Within the [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L713) function, you'll find the code for loading the tokenizer, text encoder, scheduler and models. This is also where the ControlNet model is loaded either from existing weights or randomly initialized from a UNet:
```py
if args.controlnet_model_name_or_path:
logger.info("Loading existing controlnet weights")
controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path)
else:
logger.info("Initializing controlnet weights from unet")
controlnet = ControlNetModel.from_unet(unet)
```
The [optimizer](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L871) is set up to update the ControlNet parameters:
```py
params_to_optimize = controlnet.parameters()
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
```
Finally, in the [training loop](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L943), the conditioning text embeddings and image are passed to the down and mid-blocks of the ControlNet model:
```py
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype)
down_block_res_samples, mid_block_res_sample = controlnet(
noisy_latents,
timesteps,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=controlnet_image,
return_dict=False,
)
```
If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process.
## Launch the script
Now you're ready to launch the training script! ๐
This guide uses the [fusing/fill50k](https://huggingface.co/datasets/fusing/fill50k) dataset, but remember, you can create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide).
Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model and `OUTPUT_DIR` to where you want to save the model.
Download the following images to condition your training with:
```bash
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
```
One more thing before you launch the script! Depending on the GPU you have, you may need to enable certain optimizations to train a ControlNet. The default configuration in this script requires ~38GB of vRAM. If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
<hfoptions id="gpu-select">
<hfoption id="16GB">
On a 16GB GPU, you can use bitsandbytes 8-bit optimizer and gradient checkpointing to optimize your training run. Install bitsandbytes:
```py
pip install bitsandbytes
```
Then, add the following parameter to your training command:
```bash
accelerate launch train_controlnet.py \
--gradient_checkpointing \
--use_8bit_adam \
```
</hfoption>
<hfoption id="12GB">
On a 12GB GPU, you'll need bitsandbytes 8-bit optimizer, gradient checkpointing, xFormers, and set the gradients to `None` instead of zero to reduce your memory-usage.
```bash
accelerate launch train_controlnet.py \
--use_8bit_adam \
--gradient_checkpointing \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
```
</hfoption>
<hfoption id="8GB">
On a 8GB GPU, you'll need to use [DeepSpeed](https://www.deepspeed.ai/) to offload some of the tensors from the vRAM to either the CPU or NVME to allow training with less GPU memory.
Run the following command to configure your ๐ค Accelerate environment:
```bash
accelerate config
```
During configuration, confirm that you want to use DeepSpeed stage 2. Now it should be possible to train on under 8GB vRAM by combining DeepSpeed stage 2, fp16 mixed precision, and offloading the model parameters and the optimizer state to the CPU. The drawback is that this requires more system RAM (~25 GB). See the [DeepSpeed documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more configuration options. Your configuration file should look something like:
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 4
offload_optimizer_device: cpu
offload_param_device: cpu
zero3_init_flag: false
zero_stage: 2
distributed_type: DEEPSPEED
```
You should also change the default Adam optimizer to DeepSpeedโs optimized version of Adam [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu) for a substantial speedup. Enabling `DeepSpeedCPUAdam` requires your systemโs CUDA toolchain version to be the same as the one installed with PyTorch.
bitsandbytes 8-bit optimizers donโt seem to be compatible with DeepSpeed at the moment.
That's it! You don't need to add any additional parameters to your training command.
</hfoption>
</hfoptions>
<hfoptions id="training-inference">
<hfoption id="PyTorch">
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path/to/save/model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--push_to_hub
```
</hfoption>
<hfoption id="Flax">
With Flax, you can [profile your code](https://jax.readthedocs.io/en/latest/profiling.html) by adding the `--profile_steps==5` parameter to your training command. Install the Tensorboard profile plugin:
```bash
pip install tensorflow tensorboard-plugin-profile
tensorboard --logdir runs/fill-circle-100steps-20230411_165612/
```
Then you can inspect the profile at [http://localhost:6006/#profile](http://localhost:6006/#profile).
<Tip warning={true}>
If you run into version conflicts with the plugin, try uninstalling and reinstalling all versions of TensorFlow and Tensorboard. The debugging functionality of the profile plugin is still experimental, and not all views are fully functional. The `trace_viewer` cuts off events after 1M, which can result in all your device traces getting lost if for example, you profile the compilation step by accident.
</Tip>
```bash
python3 train_controlnet_flax.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--validation_steps=1000 \
--train_batch_size=2 \
--revision="non-ema" \
--from_pt \
--report_to="wandb" \
--tracker_project_name=$HUB_MODEL_ID \
--num_train_epochs=11 \
--push_to_hub \
--hub_model_id=$HUB_MODEL_ID
```
</hfoption>
</hfoptions>
Once training is complete, you can use your newly trained model for inference!
```py
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
from diffusers.utils import load_image
import torch
controlnet = ControlNetModel.from_pretrained("path/to/controlnet", torch_dtype=torch.float16)
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
"path/to/base/model", controlnet=controlnet, torch_dtype=torch.float16
).to("cuda")
control_image = load_image("./conditioning_image_1.png")
prompt = "pale golden rod circle with old lace background"
generator = torch.manual_seed(0)
image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
image.save("./output.png")
```
## Stable Diffusion XL
Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [`train_controlnet_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet_sdxl.py) script to train a ControlNet adapter for the SDXL model.
The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide.
## Next steps
Congratulations on training your own ControlNet! To learn more about how to use your new model, the following guides may be helpful:
- Learn how to [use a ControlNet](../using-diffusers/controlnet) for inference on a variety of tasks. | diffusers/docs/source/en/training/controlnet.md/0 | {
"file_path": "diffusers/docs/source/en/training/controlnet.md",
"repo_id": "diffusers",
"token_count": 4988
} | 102 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Wuerstchen
The [Wuerstchen](https://hf.co/papers/2306.00637) model drastically reduces computational costs by compressing the latent space by 42x, without compromising image quality and accelerating inference. During training, Wuerstchen uses two models (VQGAN + autoencoder) to compress the latents, and then a third model (text-conditioned latent diffusion model) is conditioned on this highly compressed space to generate an image.
To fit the prior model into GPU memory and to speedup training, try enabling `gradient_accumulation_steps`, `gradient_checkpointing`, and `mixed_precision` respectively.
This guide explores the [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) script to help you become more familiar with it, and how you can adapt it for your own use-case.
Before running the script, make sure you install the library from source:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then navigate to the example folder containing the training script and install the required dependencies for the script you're using:
```bash
cd examples/wuerstchen/text_to_image
pip install -r requirements.txt
```
<Tip>
๐ค Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the ๐ค Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more.
</Tip>
Initialize an ๐ค Accelerate environment:
```bash
accelerate config
```
To setup a default ๐ค Accelerate environment without choosing any configurations:
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```bash
from accelerate.utils import write_basic_config
write_basic_config()
```
Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script.
<Tip>
The following sections highlight parts of the training scripts that are important for understanding how to modify it, but it doesn't cover every aspect of the [script](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) in detail. If you're interested in learning more, feel free to read through the scripts and let us know if you have any questions or concerns.
</Tip>
## Script parameters
The training scripts provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L192) function. It provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like.
For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command:
```bash
accelerate launch train_text_to_image_prior.py \
--mixed_precision="fp16"
```
Most of the parameters are identical to the parameters in the [Text-to-image](text2image#script-parameters) training guide, so let's dive right into the Wuerstchen training script!
## Training script
The training script is also similar to the [Text-to-image](text2image#training-script) training guide, but it's been modified to support Wuerstchen. This guide focuses on the code that is unique to the Wuerstchen training script.
The [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L441) function starts by initializing the image encoder - an [EfficientNet](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/modeling_efficient_net_encoder.py) - in addition to the usual scheduler and tokenizer.
```py
with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
pretrained_checkpoint_file = hf_hub_download("dome272/wuerstchen", filename="model_v2_stage_b.pt")
state_dict = torch.load(pretrained_checkpoint_file, map_location="cpu")
image_encoder = EfficientNetEncoder()
image_encoder.load_state_dict(state_dict["effnet_state_dict"])
image_encoder.eval()
```
You'll also load the [`WuerstchenPrior`] model for optimization.
```py
prior = WuerstchenPrior.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior")
optimizer = optimizer_cls(
prior.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
```
Next, you'll apply some [transforms](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656) to the images and [tokenize](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L637) the captions:
```py
def preprocess_train(examples):
images = [image.convert("RGB") for image in examples[image_column]]
examples["effnet_pixel_values"] = [effnet_transforms(image) for image in images]
examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples)
return examples
```
Finally, the [training loop](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656) handles compressing the images to latent space with the `EfficientNetEncoder`, adding noise to the latents, and predicting the noise residual with the [`WuerstchenPrior`] model.
```py
pred_noise = prior(noisy_latents, timesteps, prompt_embeds)
```
If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process.
## Launch the script
Once youโve made all your changes or youโre okay with the default configuration, youโre ready to launch the training script! ๐
Set the `DATASET_NAME` environment variable to the dataset name from the Hub. This guide uses the [Pokรฉmon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset, but you can create and train on your own datasets as well (see the [Create a dataset for training](create_dataset) guide).
<Tip>
To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. Youโll also need to add the `--validation_prompt` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results.
</Tip>
```bash
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
accelerate launch train_text_to_image_prior.py \
--mixed_precision="fp16" \
--dataset_name=$DATASET_NAME \
--resolution=768 \
--train_batch_size=4 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--dataloader_num_workers=4 \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--checkpoints_total_limit=3 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--validation_prompts="A robot pokemon, 4k photo" \
--report_to="wandb" \
--push_to_hub \
--output_dir="wuerstchen-prior-pokemon-model"
```
Once training is complete, you can use your newly trained model for inference!
```py
import torch
from diffusers import AutoPipelineForText2Image
from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16).to("cuda")
caption = "A cute bird pokemon holding a shield"
images = pipeline(
caption,
width=1024,
height=1536,
prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS,
prior_guidance_scale=4.0,
num_images_per_prompt=2,
).images
```
## Next steps
Congratulations on training a Wuerstchen model! To learn more about how to use your new model, the following may be helpful:
- Take a look at the [Wuerstchen](../api/pipelines/wuerstchen#text-to-image-generation) API documentation to learn more about how to use the pipeline for text-to-image generation and its limitations.
| diffusers/docs/source/en/training/wuerstchen.md/0 | {
"file_path": "diffusers/docs/source/en/training/wuerstchen.md",
"repo_id": "diffusers",
"token_count": 2904
} | 103 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Distilled Stable Diffusion inference
[[open-in-colab]]
Stable Diffusion inference can be a computationally intensive process because it must iteratively denoise the latents to generate an image. To reduce the computational burden, you can use a *distilled* version of the Stable Diffusion model from [Nota AI](https://huggingface.co/nota-ai). The distilled version of their Stable Diffusion model eliminates some of the residual and attention blocks from the UNet, reducing the model size by 51% and improving latency on CPU/GPU by 43%.
<Tip>
Read this [blog post](https://huggingface.co/blog/sd_distillation) to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model.
</Tip>
Let's load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model:
```py
from diffusers import StableDiffusionPipeline
import torch
distilled = StableDiffusionPipeline.from_pretrained(
"nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True,
).to("cuda")
original = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, use_safetensors=True,
).to("cuda")
```
Given a prompt, get the inference time for the original model:
```py
import time
seed = 2023
generator = torch.manual_seed(seed)
NUM_ITERS_TO_RUN = 3
NUM_INFERENCE_STEPS = 25
NUM_IMAGES_PER_PROMPT = 4
prompt = "a golden vase with different flowers"
start = time.time_ns()
for _ in range(NUM_ITERS_TO_RUN):
images = original(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
original_sd = f"{(end - start) / 1e6:.1f}"
print(f"Execution time -- {original_sd} ms\n")
"Execution time -- 45781.5 ms"
```
Time the distilled model inference:
```py
start = time.time_ns()
for _ in range(NUM_ITERS_TO_RUN):
images = distilled(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
distilled_sd = f"{(end - start) / 1e6:.1f}"
print(f"Execution time -- {distilled_sd} ms\n")
"Execution time -- 29884.2 ms"
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/original_sd.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">original Stable Diffusion (45781.5 ms)</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion (29884.2 ms)</figcaption>
</div>
</div>
## Tiny AutoEncoder
To speed inference up even more, use a tiny distilled version of the [Stable Diffusion VAE](https://huggingface.co/sayakpaul/taesdxl-diffusers) to denoise the latents into images. Replace the VAE in the distilled Stable Diffusion model with the tiny VAE:
```py
from diffusers import AutoencoderTiny
distilled.vae = AutoencoderTiny.from_pretrained(
"sayakpaul/taesd-diffusers", torch_dtype=torch.float16, use_safetensors=True,
).to("cuda")
```
Time the distilled model and distilled VAE inference:
```py
start = time.time_ns()
for _ in range(NUM_ITERS_TO_RUN):
images = distilled(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
distilled_tiny_sd = f"{(end - start) / 1e6:.1f}"
print(f"Execution time -- {distilled_tiny_sd} ms\n")
"Execution time -- 27165.7 ms"
```
<div class="flex justify-center">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd_vae.png" />
<figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion + Tiny AutoEncoder (27165.7 ms)</figcaption>
</div>
</div>
| diffusers/docs/source/en/using-diffusers/distilled_sd.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/distilled_sd.md",
"repo_id": "diffusers",
"token_count": 1680
} | 104 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Push files to the Hub
[[open-in-colab]]
๐ค Diffusers provides a [`~diffusers.utils.PushToHubMixin`] for uploading your model, scheduler, or pipeline to the Hub. It is an easy way to store your files on the Hub, and also allows you to share your work with others. Under the hood, the [`~diffusers.utils.PushToHubMixin`]:
1. creates a repository on the Hub
2. saves your model, scheduler, or pipeline files so they can be reloaded later
3. uploads folder containing these files to the Hub
This guide will show you how to use the [`~diffusers.utils.PushToHubMixin`] to upload your files to the Hub.
You'll need to log in to your Hub account with your access [token](https://huggingface.co/settings/tokens) first:
```py
from huggingface_hub import notebook_login
notebook_login()
```
## Models
To push a model to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the model to be stored on the Hub:
```py
from diffusers import ControlNetModel
controlnet = ControlNetModel(
block_out_channels=(32, 64),
layers_per_block=2,
in_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
cross_attention_dim=32,
conditioning_embedding_out_channels=(16, 32),
)
controlnet.push_to_hub("my-controlnet-model")
```
For models, you can also specify the [*variant*](loading#checkpoint-variants) of the weights to push to the Hub. For example, to push `fp16` weights:
```py
controlnet.push_to_hub("my-controlnet-model", variant="fp16")
```
The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the model's `config.json` file and the weights are automatically saved in the `safetensors` format.
Now you can reload the model from your repository on the Hub:
```py
model = ControlNetModel.from_pretrained("your-namespace/my-controlnet-model")
```
## Scheduler
To push a scheduler to the Hub, call [`~diffusers.utils.PushToHubMixin.push_to_hub`] and specify the repository id of the scheduler to be stored on the Hub:
```py
from diffusers import DDIMScheduler
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
scheduler.push_to_hub("my-controlnet-scheduler")
```
The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves the scheduler's `scheduler_config.json` file to the specified repository.
Now you can reload the scheduler from your repository on the Hub:
```py
scheduler = DDIMScheduler.from_pretrained("your-namepsace/my-controlnet-scheduler")
```
## Pipeline
You can also push an entire pipeline with all it's components to the Hub. For example, initialize the components of a [`StableDiffusionPipeline`] with the parameters you want:
```py
from diffusers import (
UNet2DConditionModel,
AutoencoderKL,
DDIMScheduler,
StableDiffusionPipeline,
)
from transformers import CLIPTextModel, CLIPTextConfig, CLIPTokenizer
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
```
Pass all of the components to the [`StableDiffusionPipeline`] and call [`~diffusers.utils.PushToHubMixin.push_to_hub`] to push the pipeline to the Hub:
```py
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
pipeline = StableDiffusionPipeline(**components)
pipeline.push_to_hub("my-pipeline")
```
The [`~diffusers.utils.PushToHubMixin.push_to_hub`] function saves each component to a subfolder in the repository. Now you can reload the pipeline from your repository on the Hub:
```py
pipeline = StableDiffusionPipeline.from_pretrained("your-namespace/my-pipeline")
```
## Privacy
Set `private=True` in the [`~diffusers.utils.PushToHubMixin.push_to_hub`] function to keep your model, scheduler, or pipeline files private:
```py
controlnet.push_to_hub("my-controlnet-model-private", private=True)
```
Private repositories are only visible to you, and other users won't be able to clone the repository and your repository won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for`. You must be [logged in](https://huggingface.co/docs/huggingface_hub/quick-start#login) to load a model from a private repository. | diffusers/docs/source/en/using-diffusers/push_to_hub.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/push_to_hub.md",
"repo_id": "diffusers",
"token_count": 2083
} | 105 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
<p align="center">
<br>
<img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/>
<br>
</p>
# Diffusers
๐ค Diffusers ใฏใ็ปๅใ้ณๅฃฐใใใใซใฏๅๅญใฎ3Dๆง้ ใ็ๆใใใใใฎใๆๅ
็ซฏใฎไบๅๅญฆ็ฟๆธใฟDiffusion Model(ๆกๆฃใขใใซ)ใๆไพใใใฉใคใใฉใชใงใใใทใณใใซใช็ๆใฝใชใฅใผใทใงใณใใๆขใใฎๅ ดๅใงใใ็ฌ่ชใฎๆกๆฃใขใใซใใใฌใผใใณใฐใใใๅ ดๅใงใใ๐ค Diffusers ใฏใใฎไธกๆนใใตใใผใใใใขใธใฅใผใซๅผใฎใใผใซใใใฏในใงใใ็งใใกใฎใฉใคใใฉใชใฏใ[ๆง่ฝใใไฝฟใใใใ](conceptual/philosophy#usability-over-performance)ใ[็ฐกๅใใใทใณใใซ](conceptual/philosophy#simple-over-easy)ใ[ๆฝ่ฑกๅใใใซในใฟใใคใบๆง](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)ใซ้็นใ็ฝฎใใฆ่จญ่จใใใฆใใพใใ
ใใฎใฉใคใใฉใชใซใฏ3ใคใฎไธป่ฆใณใณใใผใใณใใใใใพใ:
- ๆฐ่กใฎใณใผใใงๆจ่ซๅฏ่ฝใชๆๅ
็ซฏใฎ[ๆกๆฃใใคใใฉใคใณ](api/pipelines/overview)ใDiffusersใซใฏๅคใใฎใใคใใฉใคใณใใใใพใใๅฉ็จๅฏ่ฝใชใใคใใฉใคใณใ็ถฒ็พ
ใใใชในใใจใใใใใ่งฃๆฑบใใใฟในใฏใซใคใใฆใฏใใใคใใฉใคใณใฎ[ๆฆ่ฆ](https://huggingface.co/docs/diffusers/api/pipelines/overview)ใฎ่กจใใ่ฆงใใ ใใใ
- ็ๆ้ๅบฆใจๅ่ณชใฎใใฌใผใใชใใฎใใฉใณในใๅใไบคๆๅฏ่ฝใช[ใใคใบในใฑใธใฅใผใฉ](api/schedulers/overview)
- ใใซใใฃใณใฐใใญใใฏใจใใฆไฝฟ็จใใใใจใใงใใในใฑใธใฅใผใฉใจ็ตใฟๅใใใใใจใงใใจใณใใใผใจใณใใฎๆกๆฃใขใใซใๆง็ฏๅฏ่ฝใชไบๅๅญฆ็ฟๆธใฟ[ใขใใซ](api/models)
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview"
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">ใใฅใผใใชใขใซ</div>
<p class="text-gray-700">ๅบๅใฎ็ๆใ็ฌ่ชใฎๆกๆฃใทในใใ ใฎๆง็ฏใๆกๆฃใขใใซใฎใใฌใผใใณใฐใ้ๅงใใใใใซๅฟ
่ฆใชๅบๆฌ็ใชในใญใซใๅญฆใถใใจใใงใใพใใๅใใฆ ๐คDiffusersใไฝฟ็จใใๅ ดๅใฏใใใใใๅงใใใใจใใใใใใใพใ๏ผ</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview"
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">ใฌใคใ</div>
<p class="text-gray-700">ใใคใใฉใคใณใใขใใซใในใฑใธใฅใผใฉใฎ่ชญใฟ่พผใฟใซๅฝน็ซใคๅฎ่ทต็ใชใฌใคใใงใใใพใใ็นๅฎใฎใฟในใฏใซใใคใใฉใคใณใไฝฟ็จใใๆนๆณใๅบๅใฎ็ๆๆนๆณใๅถๅพกใใๆนๆณใ็ๆ้ๅบฆใๆ้ฉๅใใๆนๆณใใใพใใพใชใใฌใผใใณใฐๆๆณใซใคใใฆใๅญฆใถใใจใใงใใพใใ</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy"
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
<p class="text-gray-700">ใฉใคใใฉใชใใชใใใฎใใใซ่จญ่จใใใใฎใใ็่งฃใใใฉใคใใฉใชใๅฉ็จใใ้ใฎๅซ็็ใฌใคใใฉใคใณใๅฎๅ
จๅฏพ็ญใซใคใใฆ่ฉณใใๅญฆในใพใใ</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models/overview"
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">ใชใใกใฌใณใน</div>
<p class="text-gray-700">๐ค Diffusersใฎใฏใฉในใจใกใฝใใใใฉใฎใใใซๆฉ่ฝใใใใซใคใใฆใฎๆ่ก็ใช่ชฌๆใงใใ</p>
</a>
</div>
</div> | diffusers/docs/source/ja/index.md/0 | {
"file_path": "diffusers/docs/source/ja/index.md",
"repo_id": "diffusers",
"token_count": 2031
} | 106 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ถ๋ก ์ ์ํ OpenVINO ์ฌ์ฉ ๋ฐฉ๋ฒ
๐ค [Optimum](https://github.com/huggingface/optimum-intel)์ OpenVINO์ ํธํ๋๋ Stable Diffusion ํ์ดํ๋ผ์ธ์ ์ ๊ณตํฉ๋๋ค.
์ด์ ๋ค์ํ Intel ํ๋ก์ธ์์์ OpenVINO Runtime์ผ๋ก ์ฝ๊ฒ ์ถ๋ก ์ ์ํํ ์ ์์ต๋๋ค. ([์ฌ๊ธฐ](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html)์ ์ง์๋๋ ์ ๊ธฐ๊ธฐ ๋ชฉ๋ก์ ํ์ธํ์ธ์).
## ์ค์น
๋ค์ ๋ช
๋ น์ด๋ก ๐ค Optimum์ ์ค์นํฉ๋๋ค:
```
pip install optimum["openvino"]
```
## Stable Diffusion ์ถ๋ก
OpenVINO ๋ชจ๋ธ์ ๋ถ๋ฌ์ค๊ณ OpenVINO ๋ฐํ์์ผ๋ก ์ถ๋ก ์ ์คํํ๋ ค๋ฉด `StableDiffusionPipeline`์ `OVStableDiffusionPipeline`์ผ๋ก ๊ต์ฒดํด์ผ ํฉ๋๋ค. PyTorch ๋ชจ๋ธ์ ๋ถ๋ฌ์ค๊ณ ์ฆ์ OpenVINO ํ์์ผ๋ก ๋ณํํ๋ ค๋ ๊ฒฝ์ฐ `export=True`๋ก ์ค์ ํฉ๋๋ค.
```python
from optimum.intel.openvino import OVStableDiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True)
prompt = "a photo of an astronaut riding a horse on mars"
images = pipe(prompt).images[0]
```
[Optimum ๋ฌธ์](https://huggingface.co/docs/optimum/intel/inference#export-and-inference-of-stable-diffusion-models)์์ (์ ์ reshaping๊ณผ ๋ชจ๋ธ ์ปดํ์ผ ๋ฑ์) ๋ ๋ง์ ์์๋ค์ ์ฐพ์ ์ ์์ต๋๋ค.
| diffusers/docs/source/ko/optimization/open_vino.md/0 | {
"file_path": "diffusers/docs/source/ko/optimization/open_vino.md",
"repo_id": "diffusers",
"token_count": 920
} | 107 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Text-to-image
<Tip warning={true}>
text-to-image ํ์ธํ๋ ์คํฌ๋ฆฝํธ๋ experimental ์ํ์
๋๋ค. ๊ณผ์ ํฉํ๊ธฐ ์ฝ๊ณ ์น๋ช
์ ์ธ ๋ง๊ฐ๊ณผ ๊ฐ์ ๋ฌธ์ ์ ๋ถ๋ชํ๊ธฐ ์ฝ์ต๋๋ค. ์์ฒด ๋ฐ์ดํฐ์
์์ ์ต์์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ผ๋ ค๋ฉด ๋ค์ํ ํ์ดํผํ๋ผ๋ฏธํฐ๋ฅผ ํ์ํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
</Tip>
Stable Diffusion๊ณผ ๊ฐ์ text-to-image ๋ชจ๋ธ์ ํ
์คํธ ํ๋กฌํํธ์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค. ์ด ๊ฐ์ด๋๋ PyTorch ๋ฐ Flax๋ฅผ ์ฌ์ฉํ์ฌ ์์ฒด ๋ฐ์ดํฐ์
์์ [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) ๋ชจ๋ธ๋ก ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ์ด ๊ฐ์ด๋์ ์ฌ์ฉ๋ text-to-image ํ์ธํ๋์ ์ํ ๋ชจ๋ ํ์ต ์คํฌ๋ฆฝํธ์ ๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ ์ด [๋ฆฌํฌ์งํ ๋ฆฌ](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image)์์ ์์ธํ ์ฐพ์ ์ ์์ต๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์, ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ํ์ต dependency๋ค์ ์ค์นํด์ผ ํฉ๋๋ค:
```bash
pip install git+https://github.com/huggingface/diffusers.git
pip install -U -r requirements.txt
```
๊ทธ๋ฆฌ๊ณ [๐คAccelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํํฉ๋๋ค:
```bash
accelerate config
```
๋ฆฌํฌ์งํ ๋ฆฌ๋ฅผ ์ด๋ฏธ ๋ณต์ ํ ๊ฒฝ์ฐ, ์ด ๋จ๊ณ๋ฅผ ์ํํ ํ์๊ฐ ์์ต๋๋ค. ๋์ , ๋ก์ปฌ ์ฒดํฌ์์ ๊ฒฝ๋ก๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํ ์ ์์ผ๋ฉฐ ๊ฑฐ๊ธฐ์์ ๋ก๋๋ฉ๋๋ค.
### ํ๋์จ์ด ์๊ตฌ ์ฌํญ
`gradient_checkpointing` ๋ฐ `mixed_precision`์ ์ฌ์ฉํ๋ฉด ๋จ์ผ 24GB GPU์์ ๋ชจ๋ธ์ ํ์ธํ๋ํ ์ ์์ต๋๋ค. ๋ ๋์ `batch_size`์ ๋ ๋น ๋ฅธ ํ๋ จ์ ์ํด์๋ GPU ๋ฉ๋ชจ๋ฆฌ๊ฐ 30GB ์ด์์ธ GPU๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์ด ์ข์ต๋๋ค. TPU ๋๋ GPU์์ ํ์ธํ๋์ ์ํด JAX๋ Flax๋ฅผ ์ฌ์ฉํ ์๋ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ [์๋](#flax-jax-finetuning)๋ฅผ ์ฐธ์กฐํ์ธ์.
xFormers๋ก memory efficient attention์ ํ์ฑํํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ํจ์ฌ ๋ ์ค์ผ ์ ์์ต๋๋ค. [xFormers๊ฐ ์ค์น](./optimization/xformers)๋์ด ์๋์ง ํ์ธํ๊ณ `--enable_xformers_memory_efficient_attention`๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํฉ๋๋ค.
xFormers๋ Flax์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
## Hub์ ๋ชจ๋ธ ์
๋ก๋ํ๊ธฐ
ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํ์ฌ ๋ชจ๋ธ์ ํ๋ธ์ ์ ์ฅํฉ๋๋ค:
```bash
--push_to_hub
```
## ์ฒดํฌํฌ์ธํธ ์ ์ฅ ๋ฐ ๋ถ๋ฌ์ค๊ธฐ
ํ์ต ์ค ๋ฐ์ํ ์ ์๋ ์ผ์ ๋๋นํ์ฌ ์ ๊ธฐ์ ์ผ๋ก ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํด ๋๋ ๊ฒ์ด ์ข์ต๋๋ค. ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํ๋ ค๋ฉด ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ๋ช
์ํฉ๋๋ค.
```bash
--checkpointing_steps=500
```
500์คํ
๋ง๋ค ์ ์ฒด ํ์ต state๊ฐ 'output_dir'์ ํ์ ํด๋์ ์ ์ฅ๋ฉ๋๋ค. ์ฒดํฌํฌ์ธํธ๋ 'checkpoint-'์ ์ง๊ธ๊น์ง ํ์ต๋ step ์์
๋๋ค. ์๋ฅผ ๋ค์ด 'checkpoint-1500'์ 1500 ํ์ต step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์
๋๋ค.
ํ์ต์ ์ฌ๊ฐํ๊ธฐ ์ํด ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์ค๋ ค๋ฉด '--resume_from_checkpoint' ์ธ์๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํ๊ณ ์ฌ๊ฐํ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ง์ ํ์ญ์์ค. ์๋ฅผ ๋ค์ด ๋ค์ ์ธ์๋ 1500๊ฐ์ ํ์ต step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์๋ถํฐ ํ๋ จ์ ์ฌ๊ฐํฉ๋๋ค.
```bash
--resume_from_checkpoint="checkpoint-1500"
```
## ํ์ธํ๋
<frameworkcontent>
<pt>
๋ค์๊ณผ ๊ฐ์ด [Pokรฉmon BLIP ์บก์
](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) ๋ฐ์ดํฐ์
์์ ํ์ธํ๋ ์คํ์ ์ํด [PyTorch ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)๋ฅผ ์คํํฉ๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export dataset_name="lambdalabs/pokemon-blip-captions"
accelerate launch train_text_to_image.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$dataset_name \
--use_ema \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--output_dir="sd-pokemon-model"
```
์์ฒด ๋ฐ์ดํฐ์
์ผ๋ก ํ์ธํ๋ํ๋ ค๋ฉด ๐ค [Datasets](https://huggingface.co/docs/datasets/index)์์ ์๊ตฌํ๋ ํ์์ ๋ฐ๋ผ ๋ฐ์ดํฐ์
์ ์ค๋นํ์ธ์. [๋ฐ์ดํฐ์
์ ํ๋ธ์ ์
๋ก๋](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ๊ฑฐ๋ [ํ์ผ๋ค์ด ์๋ ๋ก์ปฌ ํด๋๋ฅผ ์ค๋น](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ ์ ์์ต๋๋ค.
์ฌ์ฉ์ ์ปค์คํ
loading logic์ ์ฌ์ฉํ๋ ค๋ฉด ์คํฌ๋ฆฝํธ๋ฅผ ์์ ํ์ญ์์ค. ๋์์ด ๋๋๋ก ์ฝ๋์ ์ ์ ํ ์์น์ ํฌ์ธํฐ๋ฅผ ๋จ๊ฒผ์ต๋๋ค. ๐ค ์๋ ์์ ์คํฌ๋ฆฝํธ๋ `TRAIN_DIR`์ ๋ก์ปฌ ๋ฐ์ดํฐ์
์ผ๋ก๋ฅผ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ๊ณผ `OUTPUT_DIR`์์ ๋ชจ๋ธ์ ์ ์ฅํ ์์น๋ฅผ ๋ณด์ฌ์ค๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export TRAIN_DIR="path_to_your_dataset"
export OUTPUT_DIR="path_to_save_model"
accelerate launch train_text_to_image.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$TRAIN_DIR \
--use_ema \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--output_dir=${OUTPUT_DIR}
```
</pt>
<jax>
[@duongna211](https://github.com/duongna21)์ ๊ธฐ์ฌ๋ก, Flax๋ฅผ ์ฌ์ฉํด TPU ๋ฐ GPU์์ Stable Diffusion ๋ชจ๋ธ์ ๋ ๋น ๋ฅด๊ฒ ํ์ตํ ์ ์์ต๋๋ค. ์ด๋ TPU ํ๋์จ์ด์์ ๋งค์ฐ ํจ์จ์ ์ด์ง๋ง GPU์์๋ ํ๋ฅญํ๊ฒ ์๋ํฉ๋๋ค. Flax ํ์ต ์คํฌ๋ฆฝํธ๋ gradient checkpointing๋ gradient accumulation๊ณผ ๊ฐ์ ๊ธฐ๋ฅ์ ์์ง ์ง์ํ์ง ์์ผ๋ฏ๋ก ๋ฉ๋ชจ๋ฆฌ๊ฐ 30GB ์ด์์ธ GPU ๋๋ TPU v3๊ฐ ํ์ํฉ๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์ ์๊ตฌ ์ฌํญ์ด ์ค์น๋์ด ์๋์ง ํ์ธํ์ญ์์ค:
```bash
pip install -U -r requirements_flax.txt
```
๊ทธ๋ฌ๋ฉด ๋ค์๊ณผ ๊ฐ์ด [Flax ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py)๋ฅผ ์คํํ ์ ์์ต๋๋ค.
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export dataset_name="lambdalabs/pokemon-blip-captions"
python train_text_to_image_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$dataset_name \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--output_dir="sd-pokemon-model"
```
์์ฒด ๋ฐ์ดํฐ์
์ผ๋ก ํ์ธํ๋ํ๋ ค๋ฉด ๐ค [Datasets](https://huggingface.co/docs/datasets/index)์์ ์๊ตฌํ๋ ํ์์ ๋ฐ๋ผ ๋ฐ์ดํฐ์
์ ์ค๋นํ์ธ์. [๋ฐ์ดํฐ์
์ ํ๋ธ์ ์
๋ก๋](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ๊ฑฐ๋ [ํ์ผ๋ค์ด ์๋ ๋ก์ปฌ ํด๋๋ฅผ ์ค๋น](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ ์ ์์ต๋๋ค.
์ฌ์ฉ์ ์ปค์คํ
loading logic์ ์ฌ์ฉํ๋ ค๋ฉด ์คํฌ๋ฆฝํธ๋ฅผ ์์ ํ์ญ์์ค. ๋์์ด ๋๋๋ก ์ฝ๋์ ์ ์ ํ ์์น์ ํฌ์ธํฐ๋ฅผ ๋จ๊ฒผ์ต๋๋ค. ๐ค ์๋ ์์ ์คํฌ๋ฆฝํธ๋ `TRAIN_DIR`์ ๋ก์ปฌ ๋ฐ์ดํฐ์
์ผ๋ก๋ฅผ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค:
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export TRAIN_DIR="path_to_your_dataset"
python train_text_to_image_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$TRAIN_DIR \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--output_dir="sd-pokemon-model"
```
</jax>
</frameworkcontent>
## LoRA
Text-to-image ๋ชจ๋ธ ํ์ธํ๋์ ์ํด, ๋๊ท๋ชจ ๋ชจ๋ธ ํ์ต์ ๊ฐ์ํํ๊ธฐ ์ํ ํ์ธํ๋ ๊ธฐ์ ์ธ LoRA(Low-Rank Adaptation of Large Language Models)๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ [LoRA ํ์ต](lora#text-to-image) ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
## ์ถ๋ก
ํ๋ธ์ ๋ชจ๋ธ ๊ฒฝ๋ก ๋๋ ๋ชจ๋ธ ์ด๋ฆ์ [`StableDiffusionPipeline`]์ ์ ๋ฌํ์ฌ ์ถ๋ก ์ ์ํด ํ์ธ ํ๋๋ ๋ชจ๋ธ์ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค:
<frameworkcontent>
<pt>
```python
from diffusers import StableDiffusionPipeline
model_path = "path_to_saved_model"
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
pipe.to("cuda")
image = pipe(prompt="yoda").images[0]
image.save("yoda-pokemon.png")
```
</pt>
<jax>
```python
import jax
import numpy as np
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline
model_path = "path_to_saved_model"
pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
prompt = "yoda pokemon"
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 50
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = pipeline.prepare_inputs(prompt)
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
image.save("yoda-pokemon.png")
```
</jax>
</frameworkcontent> | diffusers/docs/source/ko/training/text2image.md/0 | {
"file_path": "diffusers/docs/source/ko/training/text2image.md",
"repo_id": "diffusers",
"token_count": 6015
} | 108 |
Subsets and Splits