index
int64
0
0
repo_id
stringclasses
179 values
file_path
stringlengths
26
186
content
stringlengths
1
2.1M
__index_level_0__
int64
0
9
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/merge.py
#!/usr/bin/env python # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import merge_fsdp_weights description = """Utility to merge the weights from multiple FSDP checkpoints into a single combined checkpoint. Should be used if `SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}`. This is a CPU-bound process and requires enough RAM to load the entire model state dict.""" def merge_command(args): merge_fsdp_weights( args.checkpoint_directory, args.output_path, not args.unsafe_serialization, args.remove_checkpoint_dir ) def merge_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("merge-weights", description=description) else: parser = CustomArgumentParser(description=description) parser.add_argument("checkpoint_directory", type=str, help="A directory containing sharded weights saved by FSDP.") parser.add_argument( "output_path", type=str, help="The path to save the merged weights. Defaults to the current directory. ", ) parser.add_argument( "--unsafe_serialization", action="store_false", default=False, help="Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).", ) parser.add_argument( "--remove_checkpoint_dir", action="store_true", help="Whether to remove the checkpoint directory after merging.", default=False, ) if subparsers is not None: parser.set_defaults(func=merge_command) return parser def main(): parser = merge_command_parser() args = parser.parse_args() merge_command(args) if __name__ == "__main__": main()
0
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/launch.py
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib import logging import os import subprocess import sys from pathlib import Path import psutil import torch from accelerate.commands.config import default_config_file, load_config_from_file from accelerate.commands.config.config_args import SageMakerConfig from accelerate.commands.config.config_utils import DYNAMO_BACKENDS from accelerate.commands.utils import CustomArgumentParser from accelerate.state import get_int_from_env from accelerate.utils import ( ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, convert_dict_to_env_variables, is_bf16_available, is_deepspeed_available, is_mlu_available, is_musa_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, str_to_bool, ) from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES if is_rich_available(): from rich import get_console from rich.logging import RichHandler FORMAT = "%(message)s" logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) logger = logging.getLogger(__name__) options_to_group = { "multi_gpu": "Distributed GPUs", "tpu": "TPU", "use_deepspeed": "DeepSpeed Arguments", "use_fsdp": "FSDP Arguments", "use_megatron_lm": "Megatron-LM Arguments", "fp8_backend": "FP8 Arguments", } def clean_option(option): "Finds all cases of - after the first two characters and changes them to _" if "fp8_backend" in option: option = "--fp8_backend" if option.startswith("--"): return option[2:].replace("-", "_") class CustomHelpFormatter(argparse.HelpFormatter): """ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is called. This is useful for the case where the user is using a specific platform and only wants to see the arguments for that platform. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.titles = [ "Hardware Selection Arguments", "Resource Selection Arguments", "Training Paradigm Arguments", "positional arguments", "optional arguments", ] def add_argument(self, action: argparse.Action): if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: args = sys.argv[2:] else: args = sys.argv[1:] if len(args) > 1: args = list(map(clean_option, args)) used_platforms = [arg for arg in args if arg in options_to_group.keys()] used_titles = [options_to_group[o] for o in used_platforms] if action.container.title not in self.titles + used_titles: action.help = argparse.SUPPRESS elif action.container.title == "Hardware Selection Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" elif action.container.title == "Training Paradigm Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] super().add_argument(action) def end_section(self): if len(self._current_section.items) < 2: self._current_section.items = [] self._current_section.heading = "" super().end_section() def launch_command_parser(subparsers=None): description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" if subparsers is not None: parser = subparsers.add_parser( "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter ) else: parser = CustomArgumentParser( "Accelerate launch command", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter, ) parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script.", ) parser.add_argument( "--quiet", "-q", action="store_true", help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", ) # Hardware selection arguments hardware_args = parser.add_argument_group( "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." ) hardware_args.add_argument( "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." ) hardware_args.add_argument( "--multi_gpu", default=False, action="store_true", help="Whether or not this should launch a distributed GPU training.", ) hardware_args.add_argument( "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." ) hardware_args.add_argument( "--ipex", default=False, action="store_true", help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", ) # Resource selection arguments resource_args = parser.add_argument_group( "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." ) resource_args.add_argument( "--mixed_precision", type=str, choices=["no", "fp16", "bf16", "fp8"], help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", ) resource_args.add_argument( "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." ) resource_args.add_argument( "--num_machines", type=int, default=None, help="The total number of machines used in this training." ) resource_args.add_argument( "--num_cpu_threads_per_process", type=int, default=None, help="The number of CPU threads per process. Can be tuned for optimal performance.", ) resource_args.add_argument( "--enable_cpu_affinity", default=False, action="store_true", help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", ) # Dynamo arguments resource_args.add_argument( "--dynamo_backend", type=str, choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], help="Choose a backend to optimize your training with dynamo, see more at " "https://github.com/pytorch/torchdynamo.", ) resource_args.add_argument( "--dynamo_mode", type=str, default="default", choices=TORCH_DYNAMO_MODES, help="Choose a mode to optimize your training with dynamo.", ) resource_args.add_argument( "--dynamo_use_fullgraph", default=False, action="store_true", help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", ) resource_args.add_argument( "--dynamo_use_dynamic", default=False, action="store_true", help="Whether to enable dynamic shape tracing.", ) # Training Paradigm arguments paradigm_args = parser.add_argument_group( "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." ) paradigm_args.add_argument( "--use_deepspeed", default=False, action="store_true", help="Whether to use deepspeed.", ) paradigm_args.add_argument( "--use_fsdp", default=False, action="store_true", help="Whether to use fsdp.", ) paradigm_args.add_argument( "--use_megatron_lm", default=False, action="store_true", help="Whether to use Megatron-LM.", ) paradigm_args.add_argument( "--use_xpu", default=False, action="store_true", help="Whether to use IPEX plugin to speed up training on XPU specifically.", ) # distributed GPU training arguments distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") distributed_args.add_argument( "--gpu_ids", default=None, help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", ) distributed_args.add_argument( "--same_network", default=False, action="store_true", help="Whether all machines used for multinode training exist on the same local network.", ) distributed_args.add_argument( "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." ) distributed_args.add_argument( "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." ) distributed_args.add_argument( "--main_process_port", type=int, default=None, help="The port to use to communicate with the machine of rank 0.", ) distributed_args.add_argument( "-t", "--tee", default="0", type=str, help="Tee std streams into a log file and also to console.", ) distributed_args.add_argument( "--log_dir", type=str, default=None, help=( "Base directory to use for log files when using torchrun/torch.distributed.run as launcher. " "Use with --tee to redirect std streams info log files." ), ) distributed_args.add_argument( "--role", type=str, default="default", help="User-defined role for the workers.", ) # Rendezvous related arguments distributed_args.add_argument( "--rdzv_backend", type=str, default="static", help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", ) distributed_args.add_argument( "--rdzv_conf", type=str, default="", help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).", ) distributed_args.add_argument( "--max_restarts", type=int, default=0, help="Maximum number of worker group restarts before failing.", ) distributed_args.add_argument( "--monitor_interval", type=float, default=0.1, help="Interval, in seconds, to monitor the state of workers.", ) parser.add_argument( "-m", "--module", action="store_true", help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", ) parser.add_argument( "--no_python", action="store_true", help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", ) # TPU arguments tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") tpu_args.add_argument( "--tpu_cluster", action="store_true", dest="tpu_use_cluster", help="Whether to use a GCP TPU pod for training.", ) tpu_args.add_argument( "--no_tpu_cluster", action="store_false", dest="tpu_use_cluster", help="Should not be passed explicitly, this is for internal use only.", ) tpu_args.add_argument( "--tpu_use_sudo", action="store_true", help="Whether to use `sudo` when running the TPU training script in each pod.", ) tpu_args.add_argument( "--vm", type=str, action="append", help=( "List of single Compute VM instance names. " "If not provided we assume usage of instance groups. For TPU pods." ), ) tpu_args.add_argument( "--env", type=str, action="append", help="List of environment variables to set on the Compute VM instances. For TPU pods.", ) tpu_args.add_argument( "--main_training_function", type=str, default=None, help="The name of the main function to be executed in your script (only for TPU training).", ) tpu_args.add_argument( "--downcast_bf16", action="store_true", help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", ) # DeepSpeed arguments deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") deepspeed_args.add_argument( "--deepspeed_config_file", default=None, type=str, help="DeepSpeed config file.", ) deepspeed_args.add_argument( "--zero_stage", default=None, type=int, help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `2`.", ) deepspeed_args.add_argument( "--offload_optimizer_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_optimizer_nvme_path", default=None, type=str, help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_nvme_path", default=None, type=str, help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--gradient_accumulation_steps", default=None, type=int, help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1`.", ) deepspeed_args.add_argument( "--gradient_clipping", default=None, type=float, help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1.0`.", ) deepspeed_args.add_argument( "--zero3_init_flag", default=None, type=str, help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", ) deepspeed_args.add_argument( "--zero3_save_16bit_model", default=None, type=str, help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", ) deepspeed_args.add_argument( "--deepspeed_hostfile", default=None, type=str, help="DeepSpeed hostfile for configuring multi-node compute resources.", ) deepspeed_args.add_argument( "--deepspeed_exclusion_filter", default=None, type=str, help="DeepSpeed exclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_inclusion_filter", default=None, type=str, help="DeepSpeed inclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_multinode_launcher", default=None, type=str, help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", ) deepspeed_args.add_argument( "--deepspeed_moe_layer_cls_names", default=None, type=str, help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." " (useful only when `use_deepspeed` flag is passed).", ) # fsdp arguments fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") fsdp_args.add_argument( "--fsdp_offload_params", default="false", type=str, help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_min_num_params", type=int, default=1e8, help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sharding_strategy", type=str, default="FULL_SHARD", help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_auto_wrap_policy", type=str, default=None, help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_transformer_layer_cls_to_wrap", default=None, type=str, help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_backward_prefetch", default=None, type=str, help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_state_dict_type", default=None, type=str, help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_forward_prefetch", default="false", type=str, help="If True, then FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_use_orig_params", default="true", type=str, help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_cpu_ram_efficient_loading", default="true", type=str, help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sync_module_states", default="true", type=str, help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_activation_checkpointing", default="false", type=str, help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).", ) # megatron_lm args megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") megatron_lm_args.add_argument( "--megatron_lm_tp_degree", type=int, default=1, help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_pp_degree", type=int, default=1, help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_num_micro_batches", type=int, default=None, help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_sequence_parallelism", default=None, type=str, help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_recompute_activations", default=None, type=str, help="Decides Whether (true|false) to enable Selective Activation Recomputation. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_use_distributed_optimizer", default=None, type=str, help="Decides Whether (true|false) to use distributed optimizer " "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_gradient_clipping", default=1.0, type=float, help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " "(useful only when `use_megatron_lm` flag is passed).", ) # FP8 arguments fp8_args = parser.add_argument_group( "FP8 Arguments", "Arguments related to FP8 training (requires `--mixed_precision=fp8`)" ) fp8_args.add_argument( "--fp8_backend", type=str, choices=["te", "msamp"], help="Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)", ) fp8_args.add_argument( "--fp8_use_autocast_during_eval", default=False, action="store_true", help="Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.", ) fp8_args.add_argument( "--fp8_margin", type=int, default=0, help="The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_interval", type=int, default=1, help="The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_format", type=str, default="E4M3", choices=["E4M3", "HYBRID"], help="The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_amax_history_len", type=int, default=1024, help="The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_amax_compute_algo", type=str, default="most_recent", choices=["max", "most_recent"], help="The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_override_linear_precision", type=lambda x: tuple(map(str_to_bool, x.split(","))), default=(False, False, False), help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-seperated string of booleans (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_opt_level", type=str, default="O2", choices=["O1", "O2"], help="What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).", ) # AWS arguments aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") aws_args.add_argument( "--aws_access_key_id", type=str, default=None, help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", ) aws_args.add_argument( "--aws_secret_access_key", type=str, default=None, help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", ) parser.add_argument( "--debug", action="store_true", help="Whether to print out the torch.distributed stack trace when something fails.", ) parser.add_argument( "training_script", type=str, help=( "The full path to the script to be launched in parallel, followed by all the arguments for the training " "script." ), ) # MPI arguments mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") mpirun_args.add_argument( "--mpirun_hostfile", type=str, default=None, help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", ) mpirun_args.add_argument( "--mpirun_ccl", type=int, default=1, help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", ) # Other arguments of the training scripts parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") if subparsers is not None: parser.set_defaults(func=launch_command) return parser def simple_launcher(args): cmd, current_env = prepare_simple_launcher_cmd_env(args) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) def multi_gpu_launcher(args): import torch.distributed.run as distrib_run current_env = prepare_multi_gpu_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def deepspeed_launcher(args): import torch.distributed.run as distrib_run if not is_deepspeed_available(): raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") else: from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME cmd, current_env = prepare_deepspeed_cmd_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f: valid_env_items = convert_dict_to_env_variables(current_env) if len(valid_env_items) > 1: f.writelines(valid_env_items) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) else: debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def tpu_launcher(args): import torch_xla.distributed.xla_multiprocessing as xmp if args.no_python: raise ValueError("--no_python cannot be used with TPU launcher") args, current_env = prepare_tpu(args, {}) if args.module: mod_name = args.training_script else: # Import training_script as a module script_path = Path(args.training_script) sys.path.append(str(script_path.parent.resolve())) mod_name = script_path.stem mod = importlib.import_module(mod_name) if not hasattr(mod, args.main_training_function): raise ValueError( f"Your training script should have a function named {args.main_training_function}, or you should pass a " "different value to `--main_training_function`." ) # Patch sys.argv sys.argv = [mod.__file__] + args.training_script_args main_function = getattr(mod, args.main_training_function) with patch_environment(**current_env): xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) def tpu_pod_launcher(args): from torch_xla.distributed import xla_dist current_env = {} args, current_env = prepare_tpu(args, current_env, True) debug = getattr(args, "debug", False) training_script = args.training_script training_script_args = args.training_script_args new_args = _filter_args( args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] ) if args.tpu_use_sudo: new_cmd = ["sudo"] else: new_cmd = [] new_cmd += [ "accelerate-launch", "--tpu", "--no_tpu_cluster", "--num_machines", "1", "--mixed_precision", "no", "--dynamo_backend", "no", "--num_processes", str(args.num_processes), "--main_training_function", str(args.main_training_function), training_script, ] + training_script_args new_args.positional = new_cmd bad_flags = "" for arg in vars(new_args): if arg.startswith("docker_"): value = getattr(new_args, arg) if value != "" and value is not None: bad_flags += f'{arg}="{value}"\n' if bad_flags != "": raise ValueError( f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" ) new_args.env = [f"{k}={v}" for k, v in current_env.items()] new_args.env.append("ACCELERATE_IN_TPU_POD=1") try: xla_dist.resolve_and_execute(new_args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): if not is_sagemaker_available(): raise ImportError( "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" ) if args.module or args.no_python: raise ValueError( "SageMaker requires a python training script file and cannot be used with --module or --no_python" ) from sagemaker.huggingface import HuggingFace args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) huggingface_estimator = HuggingFace(**args) huggingface_estimator.fit(inputs=sagemaker_inputs) print(f"You can find your model data at: {huggingface_estimator.model_data}") def _validate_launch_command(args): # Sanity checks if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: raise ValueError( "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." ) if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") defaults = None warned = [] mp_from_config_flag = False # Get the default from the config file. if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: defaults = load_config_from_file(args.config_file) if ( not args.multi_gpu and not args.tpu and not args.tpu_use_cluster and not args.use_deepspeed and not args.use_fsdp and not args.use_megatron_lm ): args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED args.multi_gpu = ( True if defaults.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU, ) else False ) args.tpu = defaults.distributed_type == DistributedType.XLA args.use_fsdp = defaults.distributed_type == DistributedType.FSDP args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False if args.gpu_ids is None: if defaults.gpu_ids is not None: args.gpu_ids = defaults.gpu_ids else: args.gpu_ids = "all" if args.multi_gpu and args.num_machines is None: args.num_machines = defaults.num_machines if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: raise ValueError( "Less than two GPU ids were configured and tried to run on on multiple GPUs. " "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." ) if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: # Update args with the defaults for name, attr in defaults.__dict__.items(): if isinstance(attr, dict): for k in defaults.deepspeed_config: setattr(args, k, defaults.deepspeed_config[k]) for k in defaults.fsdp_config: arg_to_set = k if "fsdp" not in arg_to_set: arg_to_set = "fsdp_" + arg_to_set setattr(args, arg_to_set, defaults.fsdp_config[k]) for k in defaults.megatron_lm_config: setattr(args, k, defaults.megatron_lm_config[k]) for k in defaults.dynamo_config: setattr(args, k, defaults.dynamo_config[k]) for k in defaults.ipex_config: setattr(args, k, defaults.ipex_config[k]) for k in defaults.mpirun_config: setattr(args, k, defaults.mpirun_config[k]) continue # Those args are handled separately if ( name not in ["compute_environment", "mixed_precision", "distributed_type"] and getattr(args, name, None) is None ): setattr(args, name, attr) if not args.debug: args.debug = defaults.debug if not args.mixed_precision: if defaults.mixed_precision is None: args.mixed_precision = "no" else: args.mixed_precision = defaults.mixed_precision mp_from_config_flag = True else: if args.use_cpu or (args.use_xpu and torch.xpu.is_available()): native_amp = is_torch_version(">=", "1.10") else: native_amp = is_bf16_available(True) if ( args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) ): raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") # Silently set the default here if args.dynamo_backend is None: args.dynamo_backend = "no" if args.num_processes == -1: raise ValueError("You need to manually pass in `--num_processes` using this config yaml.") else: if args.num_processes is None: if args.use_xpu and is_xpu_available(): args.num_processes = torch.xpu.device_count() elif is_mlu_available(): args.num_processes = torch.mlu.device_count() elif is_musa_available(): args.num_processes = torch.musa.device_count() elif is_npu_available(): args.num_processes = torch.npu.device_count() else: args.num_processes = torch.cuda.device_count() warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") if args.debug is None: args.debug = False if ( not args.multi_gpu and args.num_processes > 1 and ( (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1) or (is_mlu_available() and torch.mlu.device_count() > 1) or (is_musa_available() and torch.musa.device_count() > 1) or (is_npu_available() and torch.npu.device_count() > 1) or (torch.cuda.device_count() > 1) ) ): warned.append( "\t\tMore than one GPU was found, enabling multi-GPU training.\n" "\t\tIf this was unintended please pass in `--num_processes=1`." ) args.multi_gpu = True if args.num_machines is None: warned.append("\t`--num_machines` was set to a value of `1`") args.num_machines = 1 if args.mixed_precision is None: warned.append("\t`--mixed_precision` was set to a value of `'no'`") args.mixed_precision = "no" if not hasattr(args, "use_cpu"): args.use_cpu = args.cpu if args.dynamo_backend is None: warned.append("\t`--dynamo_backend` was set to a value of `'no'`") args.dynamo_backend = "no" if args.debug: logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") is_aws_env_disabled = defaults is None or ( defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER ) if is_aws_env_disabled and args.num_cpu_threads_per_process is None: args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1) if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0: local_size = get_int_from_env( ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], max(int(args.num_processes / args.num_machines), 1), ) threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if threads_per_process > 1: args.num_cpu_threads_per_process = threads_per_process warned.append( f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" ) if any(warned): message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" message += "\n".join(warned) message += ( "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." ) logger.warning(message) return args, defaults, mp_from_config_flag def launch_command(args): args, defaults, mp_from_config_flag = _validate_launch_command(args) # Use the proper launcher if args.use_deepspeed and not args.cpu: args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] if mp_from_config_flag: args.deepspeed_fields_from_accelerate_config.append("mixed_precision") args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) deepspeed_launcher(args) elif args.use_fsdp and not args.cpu: multi_gpu_launcher(args) elif args.use_megatron_lm and not args.cpu: multi_gpu_launcher(args) elif args.multi_gpu and not args.cpu: multi_gpu_launcher(args) elif args.tpu and not args.cpu: if args.tpu_use_cluster: tpu_pod_launcher(args) else: tpu_launcher(args) elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: sagemaker_launcher(defaults, args) else: simple_launcher(args) def main(): parser = launch_command_parser() args = parser.parse_args() launch_command(args) if __name__ == "__main__": main()
1
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/env.py
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import subprocess import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available def env_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("env") else: parser = argparse.ArgumentParser("Accelerate env command") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=env_command) return parser def env_command(args): pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() pt_xpu_available = is_xpu_available() pt_mlu_available = is_mlu_available() pt_musa_available = is_musa_available() pt_npu_available = is_npu_available() accelerate_config = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(default_config_file): accelerate_config = load_config_from_file(args.config_file).to_dict() # if we can run which, get it command = None bash_location = "Not found" if os.name == "nt": command = ["where", "accelerate"] elif os.name == "posix": command = ["which", "accelerate"] if command is not None: bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip() info = { "`Accelerate` version": version, "Platform": platform.platform(), "`accelerate` bash location": bash_location, "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "PyTorch XPU available": str(pt_xpu_available), "PyTorch NPU available": str(pt_npu_available), "PyTorch MLU available": str(pt_mlu_available), "PyTorch MUSA available": str(pt_musa_available), "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", } if pt_cuda_available: info["GPU type"] = torch.cuda.get_device_name() if pt_mlu_available: info["MLU type"] = torch.mlu.get_device_name() if pt_npu_available: info["CANN version"] = torch.version.cann print("\nCopy-and-paste the text below in your GitHub issue\n") print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") accelerate_config_str = ( "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) if isinstance(accelerate_config, dict) else f"\t{accelerate_config}" ) print(accelerate_config_str) info["`Accelerate` configs"] = accelerate_config return info def main() -> int: parser = env_command_parser() args = parser.parse_args() env_command(args) return 0 if __name__ == "__main__": raise SystemExit(main())
2
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
3
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/test.py
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package def test_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("test") else: parser = argparse.ArgumentParser("Accelerate test command") parser.add_argument( "--config_file", default=None, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) if subparsers is not None: parser.set_defaults(func=test_command) return parser def test_command(args): script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py") if args.config_file is None: test_args = [script_name] else: test_args = f"--config_file={args.config_file} {script_name}".split() cmd = ["accelerate-launch"] + test_args result = execute_subprocess_async(cmd) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!") def main(): parser = test_command_parser() args = parser.parse_args() test_command(args) if __name__ == "__main__": main()
4
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/estimate.py
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import model_info from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate import init_empty_weights from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import ( calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available, ) if is_transformers_available(): import transformers from transformers import AutoConfig, AutoModel if is_timm_available(): import timm def verify_on_hub(repo: str, token: str = None): "Verifies that the model is on the hub and returns the model info." try: return model_info(repo, token=token) except (OSError, GatedRepoError): return "gated" except RepositoryNotFoundError: return "repo" def check_has_model(error): """ Checks what library spawned `error` when a model is not found """ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: return "timm" elif ( is_transformers_available() and isinstance(error, OSError) and "does not appear to have a file named" in error.args[0] ): return "transformers" else: return "unknown" def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): """ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. Args: model_name (`str`): The model name on the Hub library_name (`str`): The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no metadata on the Hub to determine the library. trust_remote_code (`bool`, `optional`, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. access_token (`str`, `optional`, defaults to `None`): The access token to use to access private or gated models on the Hub. (for use on the Gradio app) Returns: `torch.nn.Module`: The torch model that has been initialized on the `meta` device. """ model_info = verify_on_hub(model_name, access_token) # Simplified errors if model_info == "gated": raise GatedRepoError( f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." ) elif model_info == "repo": raise RepositoryNotFoundError( f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," " make sure you are authenticated via `huggingface-cli login` and have access." ) if library_name is None: library_name = getattr(model_info, "library_name", False) if not library_name: raise ValueError( f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" ) if library_name == "transformers": if not is_transformers_available(): raise ImportError( f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" ) print(f"Loading pretrained config for `{model_name}` from `transformers`...") if model_info.config is None: raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") auto_map = model_info.config.get("auto_map", False) config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) with init_empty_weights(): # remote code could specify a specific `AutoModel` class in the `auto_map` constructor = AutoModel if isinstance(auto_map, dict): value = None for key in auto_map.keys(): if key.startswith("AutoModelFor"): value = key break if value is not None: constructor = getattr(transformers, value) model = constructor.from_config(config, trust_remote_code=trust_remote_code) elif library_name == "timm": if not is_timm_available(): raise ImportError( f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" ) print(f"Loading pretrained config for `{model_name}` from `timm`...") with init_empty_weights(): model = timm.create_model(model_name, pretrained=False) else: raise ValueError( f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." ) return model def create_ascii_table(headers: list, rows: list, title: str): "Creates a pretty table from a list of rows, minimal version of `tabulate`." sep_char, in_between = "│", "─" column_widths = [] for i in range(len(headers)): column_values = [row[i] for row in rows] + [headers[i]] max_column_width = max(len(value) for value in column_values) column_widths.append(max_column_width) formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" diff = 0 def make_row(left_char, middle_char, right_char): return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" separator = make_row("├", "┼", "┤") if len(title) > sum(column_widths): diff = abs(len(title) - len(separator)) column_widths[-1] += diff # Update with diff separator = make_row("├", "┼", "┤") initial_rows = [ make_row("┌", in_between, "┐"), f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", make_row("├", "┬", "┤"), ] table = "\n".join(initial_rows) + "\n" column_widths[-1] += diff centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] table += f"{pattern % tuple(centered_line)}\n{separator}\n" for i, line in enumerate(rows): centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] table += f"{pattern % tuple(centered_line)}\n" table += f'└{"┴".join([in_between * n for n in column_widths])}┘' return table def estimate_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("estimate-memory") else: parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") parser.add_argument( "--library_name", type=str, help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", choices=["timm", "transformers"], ) parser.add_argument( "--dtypes", type=str, nargs="+", default=["float32", "float16", "int8", "int4"], help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", choices=["float32", "float16", "int8", "int4"], ) parser.add_argument( "--trust_remote_code", action="store_true", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag should only be used for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.""", default=False, ) if subparsers is not None: parser.set_defaults(func=estimate_command) return parser def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: """ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of 1. Args: bytes (`int`): The size of the model being trained. mixed_precision (`str`): The mixed precision that would be ran. msamp_config (`str`): The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. """ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} fp32_size = bytes fp16_size = bytes // 2 if mixed_precision == "float32": memory_sizes["model"] = fp32_size memory_sizes["gradients"] = fp32_size memory_sizes["optimizer"] = fp32_size * 2 memory_sizes["step"] = fp32_size * 4 elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): # With native `TransformersEngine`, there is no memory savings with FP8 # With mixed precision training, the model has weights stored # in FP16 and FP32 memory_sizes["model"] = fp32_size # 1.5 from weight gradient + computation (GEMM) memory_sizes["gradients"] = fp32_size + fp16_size # 2x from optimizer states memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states memory_sizes["step"] = memory_sizes["optimizer"] return memory_sizes def gather_data(args): "Creates an empty model and gathers the data for the sizes" try: model = create_empty_model( args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code ) except (RuntimeError, OSError) as e: library = check_has_model(e) if library != "unknown": raise RuntimeError( f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." ) raise e total_size, largest_layer = calculate_maximum_sizes(model) data = [] for dtype in args.dtypes: dtype_total_size = total_size dtype_largest_layer = largest_layer[0] dtype_training_size = estimate_training_usage(dtype_total_size, dtype) if dtype == "float16": dtype_total_size /= 2 dtype_largest_layer /= 2 elif dtype == "int8": dtype_total_size /= 4 dtype_largest_layer /= 4 elif dtype == "int4": dtype_total_size /= 8 dtype_largest_layer /= 8 data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) return data def estimate_command(args): data = gather_data(args) for row in data: for i, item in enumerate(row): if isinstance(item, (int, float)): row[i] = convert_bytes(item) elif isinstance(item, dict): training_usage = max(item.values()) row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] title = f"Memory Usage for loading `{args.model_name}`" table = create_ascii_table(headers, data, title) print(table) def main(): parser = estimate_command_parser() args = parser.parse_args() estimate_command(args) if __name__ == "__main__": main()
5
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/accelerate_cli.py
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.estimate import estimate_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.merge import merge_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser from accelerate.commands.utils import CustomArgumentParser def main(): parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False) subparsers = parser.add_subparsers(help="accelerate command helpers") # Register commands get_config_parser(subparsers=subparsers) estimate_command_parser(subparsers=subparsers) env_command_parser(subparsers=subparsers) launch_command_parser(subparsers=subparsers) merge_command_parser(subparsers=subparsers) tpu_command_parser(subparsers=subparsers) test_command_parser(subparsers=subparsers) # Let's go args = parser.parse_args() if not hasattr(args, "func"): parser.print_help() exit(1) # Run args.func(args) if __name__ == "__main__": main()
6
0
hf_public_repos/accelerate/src/accelerate
hf_public_repos/accelerate/src/accelerate/commands/tpu.py
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def tpu_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("tpu-config", description=_description) else: parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) # Core arguments config_args = parser.add_argument_group( "Config Arguments", "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file", type=str, default=None, help="Path to the config file to use for accelerate.", ) config_args.add_argument( "--tpu_name", default=None, help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", ) config_args.add_argument( "--tpu_zone", default=None, help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", ) pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") pod_args.add_argument( "--use_alpha", action="store_true", help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.", ) pod_args.add_argument( "--command_file", default=None, help="The path to the file containing the commands to run on the pod on startup.", ) pod_args.add_argument( "--command", action="append", nargs="+", help="A command to run on the pod. Can be passed multiple times.", ) pod_args.add_argument( "--install_accelerate", action="store_true", help="Whether to install accelerate on the pod. Defaults to False.", ) pod_args.add_argument( "--accelerate_version", default="latest", help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", ) pod_args.add_argument( "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=tpu_command_launcher) return parser def tpu_command_launcher(args): defaults = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(default_config_file): defaults = load_config_from_file(args.config_file) if not args.command_file and defaults.command_file is not None and not args.command: args.command_file = defaults.command_file if not args.command and defaults.commands is not None: args.command = defaults.commands if not args.tpu_name: args.tpu_name = defaults.tpu_name if not args.tpu_zone: args.tpu_zone = defaults.tpu_zone if args.accelerate_version == "dev": args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": args.accelerate_version = "accelerate -U" elif isinstance(parse(args.accelerate_version), Version): args.accelerate_version = f"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod.") if args.command_file: with open(args.command_file) as f: args.command = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0], list): args.command = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate new_cmd = ["cd /usr/share"] if args.install_accelerate: new_cmd += [f"pip install {args.accelerate_version}"] new_cmd += args.command args.command = "; ".join(new_cmd) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess cmd = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"Running {' '.join(cmd)}") return subprocess.run(cmd) print("Successfully setup pod.") def main(): parser = tpu_command_parser() args = parser.parse_args() tpu_command_launcher(args)
7
0
hf_public_repos/accelerate/src/accelerate/commands
hf_public_repos/accelerate/src/accelerate/commands/config/config.py
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def get_user_input(): compute_environment = _ask_options( "In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: config = get_sagemaker_input() else: config = get_cluster_input() return config def config_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("config", description=description) else: parser = argparse.ArgumentParser("Accelerate config command", description=description) parser.add_argument( "--config_file", default=None, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) if subparsers is not None: parser.set_defaults(func=config_command) return parser def config_command(args): config = get_user_input() if args.config_file is not None: config_file = args.config_file else: if not os.path.isdir(cache_dir): os.makedirs(cache_dir) config_file = default_yaml_config_file if config_file.endswith(".json"): config.to_json_file(config_file) else: config.to_yaml_file(config_file) print(f"accelerate configuration saved at {config_file}") def main(): parser = config_command_parser() args = parser.parse_args() config_command(args) if __name__ == "__main__": main()
8
0
hf_public_repos/accelerate/src/accelerate/commands
hf_public_repos/accelerate/src/accelerate/commands/config/cluster.py
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ...utils import ( ComputeEnvironment, DistributedType, is_deepspeed_available, is_fp8_available, is_mlu_available, is_mps_available, is_msamp_available, is_musa_available, is_npu_available, is_transformer_engine_available, is_transformers_available, is_xpu_available, ) from ...utils.constants import ( DEEPSPEED_MULTINODE_LAUNCHERS, FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, TORCH_DYNAMO_MODES, ) from .config_args import ClusterConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_distributed_mode, _convert_dynamo_backend, _convert_fp8_backend, _convert_mixed_precision, _convert_yes_no_to_bool, ) def get_cluster_input(): distributed_type = _ask_options( "Which type of machine are you using?", [ "No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "multi-MUSA", "TPU", ], _convert_distributed_mode, ) machine_rank = 0 num_machines = 1 num_processes = 1 gpu_ids = None main_process_ip = None main_process_port = None rdzv_backend = "static" same_network = True debug = False if distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ]: num_machines = _ask_field( "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", int, default=1, ) if num_machines > 1: machine_rank = _ask_options( "What is the rank of this machine?", list(range(num_machines)), int, ) main_process_ip = _ask_field( "What is the IP address of the machine that will host the main process? ", ) main_process_port = _ask_field( "What is the port you will use to communicate with the main process? ", int, ) same_network = _ask_field( "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if not same_network: rdzv_backend = _ask_field( "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" ) debug = _ask_field( "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if distributed_type == DistributedType.NO: use_cpu = _ask_field( "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) elif distributed_type == DistributedType.MULTI_CPU: use_cpu = True else: use_cpu = False ipex_config = {} mpirun_config = {} if use_cpu: ipex_config["ipex"] = _ask_field( "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if distributed_type == DistributedType.MULTI_CPU: use_mpirun = _ask_field( "Do you want accelerate to launch mpirun? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_mpirun: mpirun_hostfile = _ask_field( "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ", str, default="~/hostfile", ) mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip()) mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1) if ( not use_cpu and is_xpu_available() and distributed_type not in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA, DistributedType.MULTI_MUSA, ] ): ipex_config["use_xpu"] = _ask_field( "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config = {} use_dynamo = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_dynamo: prefix = "dynamo_" dynamo_config[prefix + "backend"] = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) use_custom_options = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_custom_options: dynamo_config[prefix + "mode"] = _ask_options( "Which mode do you want to use?", TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default=0, ) dynamo_config[prefix + "use_fullgraph"] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config[prefix + "use_dynamic"] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) use_mps = not use_cpu and is_mps_available() deepspeed_config = {} if ( distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.NO, ] and not use_mps ): use_deepspeed = _ask_field( "Do you want to use DeepSpeed? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed: distributed_type = DistributedType.DEEPSPEED assert ( is_deepspeed_available() ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" if distributed_type == DistributedType.DEEPSPEED: use_deepspeed_config = _ask_field( "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed_config: deepspeed_config["deepspeed_config_file"] = _ask_field( "Please enter the path to the json DeepSpeed config file: ", str, default="none", ) else: deepspeed_config["zero_stage"] = _ask_options( "What should be your DeepSpeed's ZeRO optimization stage?", [0, 1, 2, 3], int, default=2, ) deepspeed_devices = ["none", "cpu", "nvme"] if deepspeed_config["zero_stage"] >= 2: deepspeed_config["offload_optimizer_device"] = _ask_options( "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) deepspeed_config["offload_param_device"] = _ask_options( "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) if deepspeed_config["offload_param_device"] == "nvme": deepspeed_config["offload_param_nvme_path"] = _ask_field( "Nvme Path to offload parameters?", str, default="/nvme", ) if deepspeed_config["offload_optimizer_device"] == "nvme": deepspeed_config["offload_optimizer_nvme_path"] = _ask_field( "Nvme Path to offload optimizer states?", str, default="/nvme", ) deepspeed_config["gradient_accumulation_steps"] = _ask_field( "How many gradient accumulation steps you're passing in your script? [1]: ", int, default=1, ) use_gradient_clipping = _ask_field( "Do you want to use gradient clipping? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_gradient_clipping: deepspeed_config["gradient_clipping"] = _ask_field( "What is the gradient clipping value? [1.0]: ", float, default=1.0, ) if deepspeed_config["zero_stage"] == 3: deepspeed_config["zero3_save_16bit_model"] = _ask_field( "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) deepspeed_config["zero3_init_flag"] = _ask_field( "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if deepspeed_config["zero3_init_flag"]: if not is_transformers_available(): raise Exception( "When `zero3_init_flag` is set, it requires Transformers to be installed. " "Please run `pip3 install transformers`." ) use_moe = _ask_field( "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_moe: deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field( "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ", str, ) if num_machines > 1: launcher_query = "Which Type of launcher do you want to use?" deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( launcher_query, DEEPSPEED_MULTINODE_LAUNCHERS, lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], ) if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: deepspeed_config["deepspeed_hostfile"] = _ask_field( "DeepSpeed configures multi-node compute resources with hostfile. " "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " "for more information please refer official [documentation]" "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " "Please specify the location of hostfile: ", str, ) is_exclusion_filter = _ask_field( "Do you want to specify exclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_exclusion_filter: deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( "DeepSpeed exclusion filter string: ", str, ) is_inclusion_filter = _ask_field( "Do you want to specify inclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_inclusion_filter: deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( "DeepSpeed inclusion filter string: ", str, ) fsdp_config = {} if distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU, ]: use_fsdp = _ask_field( "Do you want to use FullyShardedDataParallel? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_fsdp: distributed_type = DistributedType.FSDP if distributed_type == DistributedType.FSDP: sharding_strategy_query = "What should be your sharding strategy?" fsdp_config["fsdp_sharding_strategy"] = _ask_options( sharding_strategy_query, FSDP_SHARDING_STRATEGY, lambda x: FSDP_SHARDING_STRATEGY[int(x)], ) fsdp_config["fsdp_offload_params"] = _ask_field( "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_wrap_query = "What should be your auto wrap policy?" fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( fsdp_wrap_query, FSDP_AUTO_WRAP_POLICY, lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], ) if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: use_no_split_modules = _ask_field( "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if not use_no_split_modules: fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :" "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ", str, ) elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: fsdp_config["fsdp_min_num_params"] = _ask_field( "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", int, default=100000000, ) fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" fsdp_config["fsdp_backward_prefetch"] = _ask_options( fsdp_backward_prefetch_query, FSDP_BACKWARD_PREFETCH, lambda x: FSDP_BACKWARD_PREFETCH[int(x)], ) fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" fsdp_config["fsdp_state_dict_type"] = _ask_options( fsdp_state_dict_type_query, FSDP_STATE_DICT_TYPE, lambda x: FSDP_STATE_DICT_TYPE[int(x)], default=2, ) fsdp_config["fsdp_forward_prefetch"] = _ask_field( "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_config["fsdp_use_orig_params"] = _ask_field( "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field( "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if fsdp_config["fsdp_cpu_ram_efficient_loading"]: fsdp_config["fsdp_sync_module_states"] = True else: fsdp_config["fsdp_sync_module_states"] = _ask_field( "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) fsdp_config["fsdp_activation_checkpointing"] = _ask_field( "Do you want to enable FSDP activation checkpointing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) megatron_lm_config = {} if distributed_type in [DistributedType.MULTI_GPU]: use_megatron_lm = _ask_field( "Do you want to use Megatron-LM ? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_megatron_lm: distributed_type = DistributedType.MEGATRON_LM if distributed_type == DistributedType.MEGATRON_LM: prefix = "megatron_lm_" megatron_lm_config[prefix + "tp_degree"] = _ask_field( "What is the Tensor Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "tp_degree"] > 1: megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( "Do you want to enable Sequence Parallelism? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "pp_degree"] = _ask_field( "What is the Pipeline Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "pp_degree"] > 1: megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( "What is the number of micro-batches? [1]:", int, default=1, error_message="Please enter an integer.", ) megatron_lm_config[prefix + "recompute_activations"] = _ask_field( "Do you want to enable selective activation recomputation? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( "Do you want to use distributed optimizer " "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", float, default=1.0, ) # TPU specific defaults tpu_commands = None tpu_command_file = None tpu_downcast_bf16 = "no" tpu_env = [] tpu_name = None tpu_vm = None tpu_zone = None tpu_use_sudo = False tpu_use_cluster = False if distributed_type in [ DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.XLA, ]: machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") if machine_type == "TPU": machine_type += " cores" elif machine_type == "CPU": machine_type = "processes" else: machine_type += "(s)" num_processes = _ask_field( f"How many {machine_type} should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: num_processes = _ask_field( "How many GPU(s) should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) else: num_processes = 1 if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1): raise ValueError( f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using." ) if ( distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.NO, ] and not use_cpu and not use_mps ): if is_npu_available(): machine_type = "NPU(s)" elif is_mlu_available(): machine_type = "MLU(s)" elif is_musa_available(): machine_type = "MUSA(s)" else: machine_type = "GPU(s)" gpu_ids = _ask_field( f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:", default="all", ) # CPU affinity is only supported on NVIDIA hardware for now enable_cpu_affinity = False if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps: enable_cpu_affinity = _ask_field( "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fp8_config = None if distributed_type == DistributedType.XLA: mixed_precision = "no" main_training_function = _ask_field( "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", default="main", ) tpu_use_cluster = _ask_field( "Are you using a TPU cluster? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if tpu_use_cluster: tpu_name = _ask_field( "What is the name of your TPU cluster? ", default=None, error_message="Please enter the name of your TPU cluster.", ) tpu_zone = _ask_field( "What is the zone of your TPU cluster? ", default=None, error_message="Please enter the zone of your TPU cluster.", ) tpu_use_sudo = _ask_field( "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ", default=False, error_message="Please enter yes or no.", ) run_commands = _ask_field( "Do you have code you wish to run on startup in each pod? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if run_commands: use_command_file = _ask_field( "Is this code located in a bash script? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_command_file: tpu_command_file = _ask_field( "What is the path to your bash script? ", default=None, error_message="Please enter the path to your bash script.", ) tpu_command_file = os.path.abspath(tpu_command_file) else: print("Please enter each command seperately you wish to run on startup in each pod.") tpu_commands = [] another_command = True while another_command: tpu_commands.append( _ask_field( "Please enter a single command to be ran ", default=None, error_message="Please enter the commands you wish to run on startup in each pod as a single string.", ) ) another_command = _ask_field( "Do you wish to add another command? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) tpu_vm = _ask_field( "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ", default="", ).split(",") tpu_env = _ask_field( "What environment variables do you wish to set in each pod, seperated by a comma: ", default="", ).split(",") else: main_training_function = "main" if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: mixed_precision = None else: mixed_precision = _ask_options( "Do you wish to use mixed precision?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError("FP8 (either Transformer Engine or MSAMP) is not installed on this machine.") fp8_config = {} fp8_config["backend"] = _ask_options( "Which FP8 backend do you want to use?", ["te", "msamp"], _convert_fp8_backend, ) if fp8_config["backend"] == "TE": if not is_transformer_engine_available(): raise ValueError("TransformersEngine was selected, but it is not installed on this machine.") fp8_config["use_autocast_during_eval"] = _ask_field( "Do you want to use FP8 autocast during eval mode? Generally better metrics are found when this is disabled [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) fp8_config["margin"] = _ask_field( "What margin should be used for gradient scaling? [0]: ", int, default=0, ) fp8_config["interval"] = _ask_field( "What interval should be used for for how often the scaling factor is recomputed? [1]: ", int, default=1, ) fp8_config["fp8_format"] = _ask_options( "Which weight format should be used?", ["HYBRID", "E4M3"], lambda x: "HYBRID" if x == 0 else "E4M3", default=0, ) fp8_config["amax_history_length"] = _ask_field( "What length of history should be used for the amax scaling factor computation? [1024]: ", int, default=1024, ) fp8_config["amax_compute_algorithm"] = _ask_options( "Which algorithm should be used for the amax scaling factor computation?", ["max", "most_recent"], lambda x: "max" if x == 0 else "most_recent", default=0, ) fp8_config["override_linear_precision"] = _ask_field( "Do you want to to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) if fp8_config["override_linear_precision"]: fprop = _ask_field( "Should `fprop` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) dgrad = _ask_field( "Should `dgrad` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) wgrad = _ask_field( "Should `wgrad` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) fp8_config["override_linear_precision"] = (fprop, dgrad, wgrad) elif fp8_config["backend"] == "MSAMP": if not is_msamp_available(): raise ValueError("MSAMP was selected, but it is not installed on this machine.") fp8_config["optimization_level"] = _ask_options( "Which optimization level should be used?", ["O1", "O2"], lambda x: "O1" if x == 0 else "O2", default=1, ) if use_dynamo and mixed_precision == "no" and not use_cpu: print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) if distributed_type == DistributedType.XLA and mixed_precision == "bf16": tpu_downcast_bf16 = _ask_field( "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" ) return ClusterConfig( compute_environment=ComputeEnvironment.LOCAL_MACHINE, distributed_type=distributed_type, num_processes=num_processes, gpu_ids=gpu_ids, mixed_precision=mixed_precision, downcast_bf16=tpu_downcast_bf16, machine_rank=machine_rank, num_machines=num_machines, main_process_ip=main_process_ip, main_process_port=main_process_port, main_training_function=main_training_function, fp8_config=fp8_config, deepspeed_config=deepspeed_config, fsdp_config=fsdp_config, megatron_lm_config=megatron_lm_config, ipex_config=ipex_config, mpirun_config=mpirun_config, use_cpu=use_cpu, rdzv_backend=rdzv_backend, same_network=same_network, commands=tpu_commands, command_file=tpu_command_file, tpu_env=tpu_env, tpu_name=tpu_name, tpu_vm=tpu_vm, tpu_zone=tpu_zone, tpu_use_sudo=tpu_use_sudo, tpu_use_cluster=tpu_use_cluster, dynamo_config=dynamo_config, debug=debug, enable_cpu_affinity=enable_cpu_affinity, )
9
0
hf_public_repos/accelerate/docker
hf_public_repos/accelerate/docker/accelerate-cpu/Dockerfile
# Builds CPU-only Docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 FROM python:3.9-slim as compile-image ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt-get install -y --no-install-recommends \ build-essential \ git \ gcc # Setup virtual environment for Docker ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv ${VIRTUAL_ENV} # Make sure we use the virtualenv ENV PATH="${VIRTUAL_ENV}/bin:$PATH" WORKDIR /workspace # Install specific CPU torch wheel to save on space RUN python3 -m pip install --upgrade --no-cache-dir pip RUN python3 -m pip install --no-cache-dir \ jupyter \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cpu # Stage 2 FROM python:3.9-slim AS build-image COPY --from=compile-image /opt/venv /opt/venv RUN useradd -ms /bin/bash user USER user # Make sure we use the virtualenv ENV PATH="/opt/venv/bin:$PATH" CMD ["/bin/bash"]
0
0
hf_public_repos/accelerate
hf_public_repos/accelerate/.devcontainer/devcontainer.json
// File only needed for VSCode users to have proper Docker based interpreters { "name": "accelerate_dev_environment", "build": { // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment "dockerfile": "../docker/accelerate-cpu/Dockerfile" // "dockerfile": "../docker/accelerate-gpu/Dockerfile" }, "runArgs": [ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available // "--gpus", "all", // Enable the docker container to access system resources "--ipc", "host" ], "remoteEnv": { "PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}" }, "customizations": { "vscode": { "extensions": [ // Ensure we have IntelliSense in VSCode when running inside container "ms-python.python" ] } }, "workspaceFolder": "/workspaces/accelerate", // Need git for VSCode to color code modifications. Only runs when building environment. "onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'" }
1
0
hf_public_repos/accelerate
hf_public_repos/accelerate/benchmarks/README.md
# Benchmarks The folders below contain suites to test various functionalities in Accelerate. See their relevant README.md's for more information.
2
0
hf_public_repos/accelerate/benchmarks
hf_public_repos/accelerate/benchmarks/big_model_inference/big_model_inference.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import torch import transformers from measures_util import end_measure, log_measures, start_measure from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from accelerate.utils import compute_module_sizes DEFAULT_MODELS = { "gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"}, "gpt-neox": {"is_causal": True, "model": "EleutherAI/gpt-neox-20b"}, "opt": {"is_causal": True, "model": "facebook/opt-30b"}, "T0pp": {"is_causal": False, "model": "bigscience/T0pp", "model_revision": "sharded"}, } PROMPTS = [ "Hello, my name is", "Are unicorns real? Unicorns are", "For the first time in several years,", "My name is Julien and I am", "The goal of life is", "Whenever I'm sad, I like to", ] def parse_args(): parser = argparse.ArgumentParser(description="Run and time generations on a big model using Accelerate.") parser.add_argument("model_name", type=str, default=None, help="The name of the model to try.") parser.add_argument( "--tokenizer_name", type=str, default=None, help="The name of the tokenizer (if different from the model." ) parser.add_argument("--is_causal", type=bool, default=None, help="Whether or not the model is causal.") parser.add_argument( "--model_revision", type=str, default=None, help="The revision to use for the model checkpoint." ) parser.add_argument("--torch_dtype", type=str, default=None, help="The dtype for the model.") parser.add_argument("--disk_offload", action="store_true") args = parser.parse_args() # Sanitize args if args.model_name in DEFAULT_MODELS: defaults = DEFAULT_MODELS[args.model_name] args.model_name = defaults["model"] if args.tokenizer_name is None: args.tokenizer_name = defaults.get("tokenizer", args.model_name) if args.is_causal is None: args.is_causal = defaults["is_causal"] if args.model_revision is None: args.model_revision = defaults.get("model_revision", "main") if args.is_causal is None: raise ValueError("Could not infer the default for `--is_causal`, pass either True or False for it.") if args.tokenizer_name is None: args.tokenizer_name = args.model_name if args.model_revision is None: args.model_revision = "main" return args def main(): transformers.utils.logging.set_verbosity_error() args = parse_args() if args.torch_dtype is None: config = AutoConfig.from_pretrained(args.model_name) torch_dtype = getattr(config, "torch_dtype", torch.float32) else: torch_dtype = getattr(torch, args.torch_dtype) model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM kwargs = { "torch_dtype": torch_dtype, "revision": args.model_revision, } if args.disk_offload: kwargs["offload_folder"] = "tmp_offload" kwargs["offload_state_dict"] = True start_measures = start_measure() model = model_cls.from_pretrained(args.model_name, device_map="auto", **kwargs) end_measures = end_measure(start_measures) log_measures(end_measures, "Model loading") module_sizes = compute_module_sizes(model) device_size = {v: 0 for v in model.hf_device_map.values()} for module, device in model.hf_device_map.items(): device_size[device] += module_sizes[module] message = "\n".join([f"- {device}: {size // 2**20}MiB" for device, size in device_size.items()]) print(f"\nTheoretical use:\n{message}") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) start_measures = start_measure() generation_times = [] gen_tokens = [] texts_outs = [] for prompt in PROMPTS: inputs = tokenizer(prompt, return_tensors="pt").to(0) tokens = inputs["input_ids"][0].tolist() before_generate = time.time() outputs = model.generate(inputs["input_ids"]) after_generate = time.time() outputs = outputs[0].tolist() num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens) generation_time = after_generate - before_generate text_out = tokenizer.decode(outputs, skip_special_tokens=True) texts_outs.append(text_out) generation_times.append(generation_time) gen_tokens.append(num_gen_tokens) print(f"Prompt: {prompt}\nGeneration {text_out}\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\n") end_measures = end_measure(start_measures) log_measures(end_measures, "Model generation") generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)] avg_gen = sum(generation_times_per_token) / len(generation_times) print(f"Average time of generation per token: {avg_gen:.2f}s") print(f"First generation (avg time per token): {generation_times_per_token[0]:.2f}s") avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1) print(f"Average time of generation per token (excluding the first): {avg_gen:.2f}s") if __name__ == "__main__": main()
3
0
hf_public_repos/accelerate/benchmarks
hf_public_repos/accelerate/benchmarks/big_model_inference/measures_util.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import threading import time import psutil import torch class PeakCPUMemory: def __init__(self): self.process = psutil.Process() self.peak_monitoring = False def peak_monitor(self): self.cpu_memory_peak = -1 while True: self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def start(self): self.peak_monitoring = True self.thread = threading.Thread(target=self.peak_monitor) self.thread.daemon = True self.thread.start() def stop(self): self.peak_monitoring = False self.thread.join() return self.cpu_memory_peak cpu_peak_tracker = PeakCPUMemory() def start_measure(): # Time measures = {"time": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = torch.cuda.memory_allocated(i) torch.cuda.reset_peak_memory_stats() return measures def end_measure(start_measures): # Time measures = {"time": time.time() - start_measures["time"]} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20 measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20 return measures def log_measures(measures, description): print(f"{description}:") print(f"- Time: {measures['time']:.2f}s") for i in range(torch.cuda.device_count()): print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB") peak = measures[f"{i}-peak"] print(f"- GPU {i} peak: {peak:.2f}MiB") print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB") print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
4
0
hf_public_repos/accelerate/benchmarks
hf_public_repos/accelerate/benchmarks/big_model_inference/README.md
# Big model inference benchmarks Running inference with Accelerate on big models. ## Setup These benchmarks use the `transformers` library: ```bash pip install transformers ``` To reproduce or test a new setup, run ```py python inference_acc.py model_name ``` This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`. To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`. If you get an error linked to disk offload, you need to add the option `--disk-offload` ## Results On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included). | Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload | |:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:| | GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no | | GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no | | GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no | | GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes | | T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no | | OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no | | OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes | Note on the results: - using two GPUs instead of one does not slow down generation - using CPU offload slows down a bit (see OPT-30b) - using disk offload slows down a lot (need to implement prefetching) You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary: - peak GPU memory is exactly the size of the model put on a given GPU - peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
5
0
hf_public_repos/accelerate/benchmarks/fp8
hf_public_repos/accelerate/benchmarks/fp8/transformer_engine/fsdp.py
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for FSDP training. """ from functools import partial import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from transformer_engine.common.recipe import DelayedScaling from transformers.models.bert import BertLayer from accelerate import Accelerator from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer}) def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model.to(device) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) new_named_params = get_named_parameters(model) # Convert the model to FSDP model = FSDP( model, use_orig_params=True, mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), auto_wrap_policy=FSDP_WRAP_POLICY, ) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) fsdp_plugin = FSDPPlugin( auto_wrap_policy=FSDP_WRAP_POLICY, use_orig_params=True, mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), ) accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
6
0
hf_public_repos/accelerate/benchmarks/fp8
hf_public_repos/accelerate/benchmarks/fp8/transformer_engine/Dockerfile
FROM nvcr.io/nvidia/pytorch:24.07-py3 RUN pip install transformers evaluate datasets RUN git clone https://github.com/huggingface/accelerate.git RUN cd accelerate && \ pip install -e . && \ cd benchmarks/fp8 RUN /bin/bash
7
0
hf_public_repos/accelerate/benchmarks/fp8
hf_public_repos/accelerate/benchmarks/fp8/transformer_engine/ddp.py
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for DDP training. """ import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from torch.nn.parallel import DistributedDataParallel as DDP from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model.to(device) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) new_named_params = get_named_parameters(model) # Convert the model to DDP device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index model = DDP(model, device_ids=device_ids, output_device=output_device) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
8
0
hf_public_repos/accelerate/benchmarks/fp8
hf_public_repos/accelerate/benchmarks/fp8/transformer_engine/fp8_utils.py
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def get_dataloaders(model_name: str, batch_size: int = 16): from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad( examples, padding="longest", pad_to_multiple_of=16, # Specific for FP8 return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=16, drop_last=True, ) return train_dataloader, eval_dataloader def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None): """ Returns a tuple of: - Model - Optimizer - Train dataloader (prepared) - Eval dataloader (prepared) - LR Scheduler Suitable for training on the MRPC dataset """ from torch.optim import AdamW from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup from accelerate import Accelerator if accelerator is None: accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(model_name) train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size) optimizer = AdamW(model.parameters(), lr=0.0001) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * 2, ) train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader) return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler def get_named_parameters(model): """ Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted from parallel) """ from accelerate.utils import extract_model_from_parallel model = extract_model_from_parallel(model) return {n: p for n, p in model.named_parameters()} def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute()
9
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/115_introducing_contrastive_search.ipynb
import torch from transformers import AutoTokenizer, GPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') input_ids = tokenizer('DeepMind Company is', return_tensors='pt').input_ids model = GPT2LMHeadModel.from_pretrained('gpt2-large') output = model.generate(input_ids, max_length=128) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')import torch from transformers import AutoTokenizer, GPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') input_ids = tokenizer('DeepMind Company is', return_tensors='pt').input_ids model = GPT2LMHeadModel.from_pretrained('gpt2-large') torch.manual_seed(0.) output = model.generate(input_ids, do_sample=True, max_length=128, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel model_name = 'gpt2-large' tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id) model.eval() # prepare the prefix prefix_text = r'DeepMind Company is' inputs = tokenizer(prefix_text, return_tensors='pt').input_ids # generate the result with contrastive search output = model.generate(input_ids, penalty_alpha=0.6, top_k=4, max_length=512) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')# Load the language model and prepare the prefix text: import torch from transformers import AutoTokenizer, GPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') model = GPT2LMHeadModel.from_pretrained('gpt2-large') prefix_text = r"In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English." input_ids = tokenizer(prefix_text, return_tensors='pt').input_idsoutput = model.generate(input_ids, max_length=512) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')torch.manual_seed(0.) output = model.generate(input_ids, do_sample=True, max_length=512, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')output = model.generate(input_ids, max_length=512, penalty_alpha=0.6, top_k=4) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')# Load the language model and prepare the prefix text: import torch from transformers import AutoTokenizer, OPTForCausalLM model_name = r'facebook/opt-1.3b' tokenizer = AutoTokenizer.from_pretrained(model_name) model = OPTForCausalLM.from_pretrained(model_name) prefix_text = r"Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously." input_ids = tokenizer(prefix_text, return_tensors='pt').input_idsoutput = model.generate(input_ids, max_length=256) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')torch.manual_seed(0.) output = model.generate(input_ids, do_sample=True, max_length=256, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')output = model.generate(input_ids, max_length=256, penalty_alpha=0.6, top_k=6) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')import tensorflow as tf from transformers import AutoTokenizer, TFGPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') input_ids = tokenizer('DeepMind Company is', return_tensors='tf').input_ids model = TFGPT2LMHeadModel.from_pretrained('gpt2-large') output = model.generate(input_ids, max_length=128) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')import tensorflow as tf from transformers import AutoTokenizer, TFGPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') input_ids = tokenizer('DeepMind Company is', return_tensors='tf').input_ids model = TFGPT2LMHeadModel.from_pretrained('gpt2-large') tf.random.set_seed(0) output = model.generate(input_ids, do_sample=True, max_length=128, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')import tensorflow from transformers import GPT2Tokenizer, TFGPT2LMHeadModel model_name = 'gpt2-large' tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = TFGPT2LMHeadModel.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id) # prepare the prefix prefix_text = r'DeepMind Company is' inputs = tokenizer(prefix_text, return_tensors='tf').input_ids # generate the result with contrastive search output = model.generate(input_ids, penalty_alpha=0.6, top_k=4, max_length=512) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')# Load the language model and prepare the prefix text: import tensorflow as tf from transformers import AutoTokenizer, TFGPT2LMHeadModel tokenizer = AutoTokenizer.from_pretrained('gpt2-large') model = TFGPT2LMHeadModel.from_pretrained('gpt2-large') prefix_text = r"In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English." input_ids = tokenizer(prefix_text, return_tensors='tf').input_idsoutput = model.generate(input_ids, max_length=512) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')tf.random.set_seed(0) output = model.generate(input_ids, do_sample=True, max_length=512, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')output = model.generate(input_ids, max_length=512, penalty_alpha=0.6, top_k=4) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')# Load the language model and prepare the prefix text: import tensorflow as tf from transformers import AutoTokenizer, TFOPTForCausalLM model_name = r'facebook/opt-1.3b' tokenizer = AutoTokenizer.from_pretrained(model_name) model = TFOPTForCausalLM.from_pretrained(model_name) prefix_text = r"Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously." input_ids = tokenizer(prefix_text, return_tensors='tf').input_idsoutput = model.generate(input_ids, max_length=256) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')tf.random.set_seed(0) output = model.generate(input_ids, do_sample=True, max_length=256, top_p=0.95, top_k=0) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')output = model.generate(input_ids, max_length=256, penalty_alpha=0.6, top_k=6) print("Output:\n" + 100 * '-') print(tokenizer.decode(output[0], skip_special_tokens=True)) print("" + 100 * '-')
0
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/08_warm_starting_encoder_decoder.ipynb
import datasets train_data = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train")train_data.info.descriptionimport pandas as pd from IPython.display import display, HTML from datasets import ClassLabel df = pd.DataFrame(train_data[:1]) del df["id"] for column, typ in train_data.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html()))from transformers import BertTokenizerFast tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")# map article and summary len to dict as well as if sample is longer than 512 tokens def map_to_length(x): x["article_len"] = len(tokenizer(x["article"]).input_ids) x["article_longer_512"] = int(x["article_len"] > tokenizer.max_len) x["summary_len"] = len(tokenizer(x["highlights"]).input_ids) x["summary_longer_64"] = int(x["summary_len"] > 64) x["summary_longer_128"] = int(x["summary_len"] > 128) return xsample_size = 10000 data_stats = train_data.select(range(sample_size)).map(map_to_length, num_proc=4) def compute_and_print_stats(x): if len(x["article_len"]) == sample_size: print( "Article Mean: {}, %-Articles > 512:{}, Summary Mean:{}, %-Summary > 64:{}, %-Summary > 128:{}".format( sum(x["article_len"]) / sample_size, sum(x["article_longer_512"]) / sample_size, sum(x["summary_len"]) / sample_size, sum(x["summary_longer_64"]) / sample_size, sum(x["summary_longer_128"]) / sample_size, ) ) output = data_stats.map( compute_and_print_stats, batched=True, batch_size=-1, )encoder_max_length=512 decoder_max_length=128 def process_data_to_model_inputs(batch): # tokenize the inputs and labels inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=encoder_max_length) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=decoder_max_length) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["decoder_attention_mask"] = outputs.attention_mask batch["labels"] = outputs.input_ids.copy() # because BERT automatically shifts the labels, the labels correspond exactly to `decoder_input_ids`. # We have to make sure that the PAD token is ignored batch["labels"] = [[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]] return batchtrain_data = train_data.select(range(32))# batch_size = 16 batch_size=4 train_data = train_data.map( process_data_to_model_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights", "id"] )train_datatrain_data.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )val_data = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:10%]")val_data = val_data.select(range(8))val_data = val_data.map( process_data_to_model_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights", "id"] )val_data.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )from transformers import EncoderDecoderModelbert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")bert2bertbert2bert.save_pretrained("bert2bert")bert2bert = EncoderDecoderModel.from_pretrained("bert2bert")bert2bert.configshared_bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "bert-base-cased", tie_encoder_decoder=True) print(f"\n\nNum Params. Shared: {shared_bert2bert.num_parameters()}, Non-Shared: {bert2bert.num_parameters()}")# free memory del shared_bert2bertbert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.pad_token_id = tokenizer.pad_token_id bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_sizebert2bert.config.max_length = 142 bert2bert.config.min_length = 56 bert2bert.config.no_repeat_ngram_size = 3 bert2bert.config.early_stopping = True bert2bert.config.length_penalty = 2.0 bert2bert.config.num_beams = 4from seq2seq_trainer import Seq2SeqTrainer from seq2seq_training_args import Seq2SeqTrainingArgumentstraining_args = Seq2SeqTrainingArguments( predict_with_generate=True, evaluation_strategy="steps", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, fp16=True, output_dir="./", logging_steps=2, save_steps=10, eval_steps=4, # logging_steps=1000, # save_steps=500, # eval_steps=7500, # warmup_steps=2000, # save_total_limit=3, )rouge = datasets.load_metric("rouge")def compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) labels_ids[labels_ids == -100] = tokenizer.pad_token_id label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) rouge_output = rouge.compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])["rouge2"].mid return { "rouge2_precision": round(rouge_output.precision, 4), "rouge2_recall": round(rouge_output.recall, 4), "rouge2_fmeasure": round(rouge_output.fmeasure, 4), }# instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=compute_metrics, train_dataset=train_data, eval_dataset=val_data, ) trainer.train()dummy_bert2bert = EncoderDecoderModel.from_pretrained("./checkpoint-20")from transformers import BertTokenizer bert2bert = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail").to("cuda") tokenizer = BertTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")test_data = datasets.load_dataset("cnn_dailymail", "3.0.0", split="test[:2%]")def generate_summary(batch): # cut off at BERT max length 512 inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512, return_tensors="pt") input_ids = inputs.input_ids.to("cuda") attention_mask = inputs.attention_mask.to("cuda") outputs = bert2bert.generate(input_ids, attention_mask=attention_mask) output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch["pred_summary"] = output_str return batchbatch_size = 16 # change to 64 for full evaluation results = test_data.map(generate_summary, batched=True, batch_size=batch_size, remove_columns=["article"])rouge.compute(predictions=results["pred_summary"], references=results["highlights"], rouge_types=["rouge2"])["rouge2"].mid
1
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/fl-with-flower.ipynb
from collections import OrderedDict import os import random import warnings import flwr as fl import torch from torch.utils.data import DataLoader from datasets import load_dataset from evaluate import load as load_metric from transformers import AutoTokenizer, DataCollatorWithPadding from transformers import AutoModelForSequenceClassification from transformers import AdamW from transformers import loggingwarnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=DeprecationWarning) logging.set_verbosity(logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' warnings.simplefilter('ignore') DEVICE = torch.device("cpu") CHECKPOINT = "albert-base-v2" # transformer model checkpoint NUM_CLIENTS = 2 NUM_ROUNDS = 3def load_data(): """Load IMDB data (training and eval)""" raw_datasets = load_dataset("imdb") raw_datasets = raw_datasets.shuffle(seed=42) # remove unnecessary data split del raw_datasets["unsupervised"] tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) def tokenize_function(examples): return tokenizer(examples["text"], truncation=True) # Select 20 random samples to reduce the computation cost train_population = random.sample(range(len(raw_datasets["train"])), 20) test_population = random.sample(range(len(raw_datasets["test"])), 20) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) tokenized_datasets["train"] = tokenized_datasets["train"].select(train_population) tokenized_datasets["test"] = tokenized_datasets["test"].select(test_population) tokenized_datasets = tokenized_datasets.remove_columns("text") tokenized_datasets = tokenized_datasets.rename_column("label", "labels") data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainloader = DataLoader( tokenized_datasets["train"], shuffle=True, batch_size=32, collate_fn=data_collator, ) testloader = DataLoader( tokenized_datasets["test"], batch_size=32, collate_fn=data_collator ) return trainloader, testloaderdef train(net, trainloader, epochs): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: batch = {k: v.to(DEVICE) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() def test(net, testloader): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: batch = {k: v.to(DEVICE) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits loss += outputs.loss.item() predictions = torch.argmax(logits, dim=-1) metric.add_batch(predictions=predictions, references=batch["labels"]) loss /= len(testloader.dataset) accuracy = metric.compute()["accuracy"] return loss, accuracynet = AutoModelForSequenceClassification.from_pretrained( CHECKPOINT, num_labels=2 ).to(DEVICE)class IMDBClient(fl.client.NumPyClient): def __init__(self, net, trainloader, testloader): self.net = net self.trainloader = trainloader self.testloader = testloader def get_parameters(self, config): return [val.cpu().numpy() for _, val in self.net.state_dict().items()] def set_parameters(self, parameters): params_dict = zip(self.net.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) self.net.load_state_dict(state_dict, strict=True) def fit(self, parameters, config): self.set_parameters(parameters) print("Training Started...") train(self.net, self.trainloader, epochs=1) print("Training Finished.") return self.get_parameters(config={}), len(self.trainloader), {} def evaluate(self, parameters, config): self.set_parameters(parameters) loss, accuracy = test(self.net, self.testloader) return float(loss), len(self.testloader), {"accuracy": float(accuracy), "loss": float(loss)}trainloader, testloader = load_data() def client_fn(cid): return IMDBClient(net, trainloader, testloader)def weighted_average(metrics): accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] losses = [num_examples * m["loss"] for num_examples, m in metrics] examples = [num_examples for num_examples, _ in metrics] return {"accuracy": sum(accuracies) / sum(examples), "loss": sum(losses) / sum(examples)} strategy = fl.server.strategy.FedAvg( fraction_fit=1.0, fraction_evaluate=1.0, evaluate_metrics_aggregation_fn=weighted_average, ) fl.simulation.start_simulation( client_fn=client_fn, num_clients=NUM_CLIENTS, config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), strategy=strategy, client_resources={"num_cpus": 1, "num_gpus": 0}, ray_init_args={"log_to_driver": False, "num_cpus": 1, "num_gpus": 0} )
2
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/56_fine_tune_segformer.ipynb
from huggingface_hub import notebook_login notebook_login()hf_username = "tobiasc"from segments import SegmentsClient from getpass import getpass api_key = getpass('Enter your API key: ') segments_client = SegmentsClient(api_key)dataset_name = "sidewalk-imagery"#@title `task_attributes = {...}` task_attributes = { "format_version": "0.1", "categories": [ { "name": "flat-road", "id": 1, "color": [ 216, 82, 24, 255 ], "attributes": [] }, { "name": "flat-sidewalk", "id": 2, "color": [ 255, 255, 0 ], "attributes": [] }, { "name": "flat-crosswalk", "id": 3, "color": [ 125, 46, 141, 255 ], "attributes": [] }, { "name": "flat-cyclinglane", "id": 4, "color": [ 118, 171, 47, 255 ], "attributes": [] }, { "name": "flat-parkingdriveway", "id": 5, "color": [ 161, 19, 46, 255 ], "attributes": [] }, { "name": "flat-railtrack", "id": 6, "color": [ 255, 0, 0, 255 ], "attributes": [] }, { "name": "flat-curb", "id": 7, "color": [ 0, 128, 128 ], "attributes": [] }, { "name": "human-person", "id": 8, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 190, 190, 0, 255 ] }, { "name": "human-rider", "id": 9, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 0, 255, 0, 255 ] }, { "name": "vehicle-car", "id": 10, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 0, 0, 255, 255 ] }, { "name": "vehicle-truck", "id": 11, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 170, 0, 255, 255 ] }, { "name": "vehicle-bus", "id": 12, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 84, 84, 0, 255 ] }, { "name": "vehicle-tramtrain", "id": 13, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 84, 170, 0, 255 ] }, { "name": "vehicle-motorcycle", "id": 14, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 84, 255, 0, 255 ] }, { "name": "vehicle-bicycle", "id": 15, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 170, 84, 0, 255 ] }, { "name": "vehicle-caravan", "id": 16, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 170, 170, 0, 255 ] }, { "name": "vehicle-cartrailer", "id": 17, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 170, 255, 0, 255 ] }, { "name": "construction-building", "id": 18, "color": [ 255, 84, 0, 255 ], "attributes": [] }, { "name": "construction-door", "id": 19, "color": [ 255, 170, 0, 255 ], "attributes": [] }, { "name": "construction-wall", "id": 20, "color": [ 255, 255, 0, 255 ], "attributes": [] }, { "name": "construction-fenceguardrail", "id": 21, "color": [ 33, 138, 200 ], "attributes": [] }, { "name": "construction-bridge", "id": 22, "color": [ 0, 170, 127, 255 ], "attributes": [] }, { "name": "construction-tunnel", "id": 23, "color": [ 0, 255, 127, 255 ], "attributes": [] }, { "name": "construction-stairs", "id": 24, "color": [ 84, 0, 127, 255 ], "attributes": [] }, { "name": "object-pole", "id": 25, "attributes": [ { "name": "is_crowd", "input_type": "checkbox", "default_value": False } ], "color": [ 84, 84, 127, 255 ] }, { "name": "object-trafficsign", "id": 26, "color": [ 84, 170, 127, 255 ], "attributes": [] }, { "name": "object-trafficlight", "id": 27, "color": [ 84, 255, 127, 255 ], "attributes": [] }, { "name": "nature-vegetation", "id": 28, "color": [ 170, 0, 127, 255 ], "attributes": [] }, { "name": "nature-terrain", "id": 29, "color": [ 170, 84, 127, 255 ], "attributes": [] }, { "name": "sky", "id": 30, "color": [ 170, 170, 127, 255 ], "attributes": [] }, { "name": "void-ground", "id": 31, "color": [ 170, 255, 127, 255 ], "attributes": [] }, { "name": "void-dynamic", "id": 32, "color": [ 255, 0, 127, 255 ], "attributes": [] }, { "name": "void-static", "id": 33, "color": [ 255, 84, 127, 255 ], "attributes": [] }, { "name": "void-unclear", "id": 34, "color": [ 255, 170, 127, 255 ], "attributes": [] } ] }dataset_response = segments_client.add_dataset(dataset_name, task_attributes=task_attributes, category='street_scenery') dataset_identifier = f'{dataset_response.owner.username}/{dataset_name}'#@title `sample_attributes = [...]` sample_attributes = [ { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/72939ba9-8488-4dfe-81a2-1a299f2e1d95.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/6ef02d5d-e7e4-40f6-b65a-47dee4815e7a.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/46216c90-7af9-4e06-af28-4a0734a1e3a2.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/545a233e-4413-4b35-9e89-659be3550ddf.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/3c0ef45e-6be6-48f3-b3cd-eb283ca3cb34.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/8683e29d-3112-4dff-9a64-c699bc6e1457.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/83ffe351-68ea-4730-b49c-4e6945ab5c18.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/653e6961-d2fa-4c1f-b450-9615707372ed.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/5ee54f18-f528-40dd-83a6-92ac4771fe75.jpg" } }, { "image": { "url": "https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/tobias/54b0f15a-271e-4b0c-962f-23bbf179c554.jpg" } }, ]for attributes in sample_attributes: name = attributes['image']['url'].split('/')[-1] segments_client.add_sample(dataset_identifier, name, attributes)release_name = "v0.1" segments_client.add_release(dataset_identifier, release_name)from segments.huggingface import release2dataset release = segments_client.get_release(dataset_identifier, release_name) hf_dataset = release2dataset(release)hf_dataset.featuresfrom segments.utils import get_semantic_bitmap def convert_segmentation_bitmap(example): return { "label.segmentation_bitmap": get_semantic_bitmap( example["label.segmentation_bitmap"], example["label.annotations"], ) } semantic_dataset = hf_dataset.map( convert_segmentation_bitmap, )semantic_dataset = semantic_dataset.rename_column('image', 'pixel_values') semantic_dataset = semantic_dataset.rename_column('label.segmentation_bitmap', 'label') semantic_dataset = semantic_dataset.remove_columns(['name', 'uuid', 'status', 'label.annotations'])semantic_dataset.featureshf_dataset_identifier = f"{hf_username}/{dataset_name}" semantic_dataset.push_to_hub(hf_dataset_identifier)hf_dataset_identifier = "segments/sidewalk-semantic"from datasets import load_dataset ds = load_dataset(hf_dataset_identifier)ds = ds.shuffle(seed=1) ds = ds["train"].train_test_split(test_size=0.2) train_ds = ds["train"] test_ds = ds["test"]import json from huggingface_hub import hf_hub_download filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id=hf_dataset_identifier, filename=filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) print("Id2label:", id2label)from torchvision.transforms import ColorJitter from transformers import ( SegformerImageProcessor, ) processor = SegformerImageProcessor() jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) def train_transforms(example_batch): images = [jitter(x) for x in example_batch['pixel_values']] labels = [x for x in example_batch['label']] inputs = processor(images, labels) return inputs def val_transforms(example_batch): images = [x for x in example_batch['pixel_values']] labels = [x for x in example_batch['label']] inputs = processor(images, labels) return inputs # Set transforms train_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms)from transformers import SegformerForSemanticSegmentation pretrained_model_name = "nvidia/mit-b0" model = SegformerForSemanticSegmentation.from_pretrained( pretrained_model_name, id2label=id2label, label2id=label2id )from transformers import TrainingArguments epochs = 50 lr = 0.00006 batch_size = 2 hub_model_id = "segformer-b0-finetuned-segments-sidewalk-oct-22" training_args = TrainingArguments( "segformer-b0-finetuned-segments-sidewalk-outputs", learning_rate=lr, num_train_epochs=epochs, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, save_total_limit=3, evaluation_strategy="steps", save_strategy="steps", save_steps=20, eval_steps=20, logging_steps=1, eval_accumulation_steps=5, load_best_model_at_end=True, push_to_hub=True, hub_model_id=hub_model_id, hub_strategy="end", )import torch from torch import nn import evaluate import multiprocessing metric = evaluate.load("mean_iou") def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric._compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metricsfrom transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, )trainer.train()kwargs = { "tags": ["vision", "image-segmentation"], "finetuned_from": pretrained_model_name, "dataset": hf_dataset_identifier, } processor.push_to_hub(hub_model_id) trainer.push_to_hub(**kwargs)from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation processor = SegformerImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerForSemanticSegmentation.from_pretrained(f"{hf_username}/{hub_model_id}")image = test_ds[0]['pixel_values'] gt_seg = test_ds[0]['label'] imagefrom torch import nn inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4) # First, rescale logits to original image size upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], # (height, width) mode='bilinear', align_corners=False ) # Second, apply argmax on the class dimension pred_seg = upsampled_logits.argmax(dim=1)[0]#@title `def sidewalk_palette()` def sidewalk_palette(): """Sidewalk palette that maps each class to RGB values.""" return [ [0, 0, 0], [216, 82, 24], [255, 255, 0], [125, 46, 141], [118, 171, 47], [161, 19, 46], [255, 0, 0], [0, 128, 128], [190, 190, 0], [0, 255, 0], [0, 0, 255], [170, 0, 255], [84, 84, 0], [84, 170, 0], [84, 255, 0], [170, 84, 0], [170, 170, 0], [170, 255, 0], [255, 84, 0], [255, 170, 0], [255, 255, 0], [33, 138, 200], [0, 170, 127], [0, 255, 127], [84, 0, 127], [84, 84, 127], [84, 170, 127], [84, 255, 127], [170, 0, 127], [170, 84, 127], [170, 170, 127], [170, 255, 127], [255, 0, 127], [255, 84, 127], [255, 170, 127], ]import numpy as np def get_seg_overlay(image, seg): color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3 palette = np.array(sidewalk_palette()) for label, color in enumerate(palette): color_seg[seg == label, :] = color # Show image + mask img = np.array(image) * 0.5 + color_seg * 0.5 img = img.astype(np.uint8) return imgimport matplotlib.pyplot as plt pred_img = get_seg_overlay(image, pred_seg) gt_img = get_seg_overlay(image, np.array(gt_seg)) f, axs = plt.subplots(1, 2) f.set_figheight(30) f.set_figwidth(50) axs[0].set_title("Prediction", {'fontsize': 40}) axs[0].imshow(pred_img) axs[1].set_title("Ground truth", {'fontsize': 40}) axs[1].imshow(gt_img)
3
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/18_fine_tune_xlsr_wav2vec2_asr.ipynb
from datasets import load_dataset, load_metric common_voice_train = load_dataset("common_voice", "tr", data_dir="./cv-corpus-6.1-2020-12-11", split="train+validation") common_voice_test = load_dataset("common_voice", "tr", data_dir="./cv-corpus-6.1-2020-12-11", split="test")common_voice_train = common_voice_train.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"]) common_voice_test = common_voice_test.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"])from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) display(HTML(df.to_html()))show_random_elements(common_voice_train.remove_columns(["path"]), num_examples=20)import re chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]' def remove_special_characters(batch): batch["text"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " " return batchcommon_voice_train = common_voice_train.map(remove_special_characters, remove_columns=["sentence"]) common_voice_test = common_voice_test.map(remove_special_characters, remove_columns=["sentence"])show_random_elements(common_voice_train.remove_columns(["path"]))def extract_all_chars(batch): all_text = " ".join(batch["text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]}vocab_train = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_train.column_names) vocab_test = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names)vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))vocab_dict = {v: k for k, v in enumerate(vocab_list)} vocab_dictvocab_dict["|"] = vocab_dict[" "] del vocab_dict[" "]vocab_dict["[UNK]"] = len(vocab_dict) vocab_dict["[PAD]"] = len(vocab_dict) len(vocab_dict)import json with open('vocab.json', 'w') as vocab_file: json.dump(vocab_dict, vocab_file)from transformers import Wav2Vec2CTCTokenizer tokenizer = Wav2Vec2CTCTokenizer("./vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")from transformers import Wav2Vec2FeatureExtractor feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)from transformers import Wav2Vec2Processor processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)# from google.colab import drive # drive.mount('/content/gdrive/')# processor.save_pretrained("/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo")common_voice_train[0]import torchaudio def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = speech_array[0].numpy() batch["sampling_rate"] = sampling_rate batch["target_text"] = batch["text"] return batchcommon_voice_train = common_voice_train.map(speech_file_to_array_fn, remove_columns=common_voice_train.column_names) common_voice_test = common_voice_test.map(speech_file_to_array_fn, remove_columns=common_voice_test.column_names)import librosa import numpy as np def resample(batch): batch["speech"] = librosa.resample(np.asarray(batch["speech"]), 48_000, 16_000) batch["sampling_rate"] = 16_000 return batchcommon_voice_train = common_voice_train.map(resample, num_proc=4) common_voice_test = common_voice_test.map(resample, num_proc=4)import IPython.display as ipd import numpy as np import random rand_int = random.randint(0, len(common_voice_train)) ipd.Audio(data=np.asarray(common_voice_train[rand_int]["speech"]), autoplay=True, rate=16000)rand_int = random.randint(0, len(common_voice_train)) print("Target text:", common_voice_train[rand_int]["target_text"]) print("Input array shape:", np.asarray(common_voice_train[rand_int]["speech"]).shape) print("Sampling rate:", common_voice_train[rand_int]["sampling_rate"])def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["target_text"]).input_ids return batchcommon_voice_train = common_voice_train.map(prepare_dataset, remove_columns=common_voice_train.column_names, batch_size=8, num_proc=4, batched=True) common_voice_test = common_voice_test.map(prepare_dataset, remove_columns=common_voice_test.column_names, batch_size=8, num_proc=4, batched=True)import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batchdata_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)wer_metric = load_metric("wer")def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}from transformers import Wav2Vec2ForCTC model = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-large-xlsr-53", attention_dropout=0.1, hidden_dropout=0.1, feat_proj_dropout=0.0, mask_time_prob=0.05, layerdrop=0.1, gradient_checkpointing=True, ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer) )model.freeze_feature_extractor()from transformers import TrainingArguments training_args = TrainingArguments( output_dir="/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo", # output_dir="./wav2vec2-large-xlsr-turkish-demo", group_by_length=True, per_device_train_batch_size=16, gradient_accumulation_steps=2, evaluation_strategy="steps", num_train_epochs=30, fp16=True, save_steps=400, eval_steps=400, logging_steps=400, learning_rate=3e-4, warmup_steps=500, save_total_limit=2, )from transformers import Trainer trainer = Trainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=common_voice_train, eval_dataset=common_voice_test, tokenizer=processor.feature_extractor, )trainer.train()model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-turkish-demo").to("cuda") processor = Wav2Vec2Processor.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-turkish-demo")input_dict = processor(common_voice_test["input_values"][0], return_tensors="pt", padding=True) logits = model(input_dict.input_values.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1)[0][0]common_voice_test_transcription = load_dataset("common_voice", "tr", data_dir="./cv-corpus-6.1-2020-12-11", split="test")print("Prediction:") print(processor.decode(pred_ids[0])) print("\nReference:") print(common_voice_test_transcription["sentence"][0].lower())
4
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/05_encoder_decoder.ipynb
from transformers import MarianMTModel, MarianTokenizer tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") # create ids of encoded input vectors input_ids = tokenizer("I want to buy a car", return_tensors="pt").input_ids # translate example output_ids = model.generate(input_ids)[0] # decode and print print(tokenizer.decode(output_ids))embeddings = model.get_input_embeddings() # create ids of encoded input vectors input_ids = tokenizer("I want to buy a car", return_tensors="pt").input_ids # pass input_ids to encoder encoder_hidden_states = model.base_model.encoder(input_ids, return_dict=True).last_hidden_state # change the input slightly and pass to encoder input_ids_perturbed = tokenizer("I want to buy a house", return_tensors="pt").input_ids encoder_hidden_states_perturbed = model.base_model.encoder(input_ids_perturbed, return_dict=True).last_hidden_state # compare shape and encoding of first vector print(f"Length of input embeddings {embeddings(input_ids).shape[1]}. Length of encoder_hidden_states {encoder_hidden_states.shape[1]}") # compare values of word embedding of "I" for input_ids and perturbed input_ids print("Is encoding for `I` equal to its perturbed version?: ", torch.allclose(encoder_hidden_states[0, 0], encoder_hidden_states_perturbed[0, 0], atol=1e-3))from transformers import MarianMTModel, MarianTokenizer import torch tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") embeddings = model.get_input_embeddings() # create token ids for encoder input input_ids = tokenizer("I want to buy a car", return_tensors="pt").input_ids # pass input token ids to encoder encoder_output_vectors = model.base_model.encoder(input_ids, return_dict=True).last_hidden_state # create token ids for decoder input decoder_input_ids = tokenizer("<pad> Ich will ein", return_tensors="pt", add_special_tokens=False).input_ids # pass decoder input ids and encoded input vectors to decoder decoder_output_vectors = model.base_model.decoder(decoder_input_ids, encoder_hidden_states=encoder_output_vectors).last_hidden_state # derive embeddings by multiplying decoder outputs with embedding weights lm_logits = torch.nn.functional.linear(decoder_output_vectors, embeddings.weight, bias=model.final_logits_bias) # change the decoder input slightly decoder_input_ids_perturbed = tokenizer("<pad> Ich will das", return_tensors="pt", add_special_tokens=False).input_ids decoder_output_vectors_perturbed = model.base_model.decoder(decoder_input_ids_perturbed, encoder_hidden_states=encoder_output_vectors).last_hidden_state lm_logits_perturbed = torch.nn.functional.linear(decoder_output_vectors_perturbed, embeddings.weight, bias=model.final_logits_bias) # compare shape and encoding of first vector print(f"Shape of decoder input vectors {embeddings(decoder_input_ids).shape}. Shape of decoder logits {lm_logits.shape}") # compare values of word embedding of "I" for input_ids and perturbed input_ids print("Is encoding for `Ich` equal to its perturbed version?: ", torch.allclose(lm_logits[0, 0], lm_logits_perturbed[0, 0], atol=1e-3))from transformers import MarianMTModel, MarianTokenizer import torch tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") # create ids of encoded input vectors input_ids = tokenizer("I want to buy a car", return_tensors="pt").input_ids # create BOS token decoder_input_ids = tokenizer("<pad>", add_special_tokens=False, return_tensors="pt").input_ids assert decoder_input_ids[0, 0].item() == model.config.decoder_start_token_id, "`decoder_input_ids` should correspond to `model.config.decoder_start_token_id`" # STEP 1 # pass input_ids to encoder and to decoder and pass BOS token to decoder to retrieve first logit outputs = model(input_ids, decoder_input_ids=decoder_input_ids, return_dict=True) # get encoded sequence encoded_sequence = (outputs.encoder_last_hidden_state,) # get logits lm_logits = outputs.logits # sample last token with highest prob next_decoder_input_ids = torch.argmax(lm_logits[:, -1:], axis=-1) # concat decoder_input_ids = torch.cat([decoder_input_ids, next_decoder_input_ids], axis=-1) # STEP 2 # reuse encoded_inputs and pass BOS + "Ich" to decoder to second logit lm_logits = model(None, encoder_outputs=encoded_sequence, decoder_input_ids=decoder_input_ids, return_dict=True).logits # sample last token with highest prob again next_decoder_input_ids = torch.argmax(lm_logits[:, -1:], axis=-1) # concat again decoder_input_ids = torch.cat([decoder_input_ids, next_decoder_input_ids], axis=-1) # STEP 3 lm_logits = model(None, encoder_outputs=encoded_sequence, decoder_input_ids=decoder_input_ids, return_dict=True).logits next_decoder_input_ids = torch.argmax(lm_logits[:, -1:], axis=-1) decoder_input_ids = torch.cat([decoder_input_ids, next_decoder_input_ids], axis=-1) # let's see what we have generated so far! print(f"Generated so far: {tokenizer.decode(decoder_input_ids[0], skip_special_tokens=True)}") # This can be written in a loop as well.
5
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/graphml-classification.ipynb
from transformers.utils import is_cython_available print("Cython is installed:", is_cython_available())from huggingface_hub import notebook_login notebook_login()from datasets import load_dataset dataset = load_dataset("OGB/ogbg-molhiv")from datasets import load_metric metric = load_metric("accuracy")datasetprint(dataset["train"][0])dataset["train"].featuresimport networkx as nx import matplotlib.pyplot as plt# We want to plot the first train graph graph = dataset["train"][0] edges = graph["edge_index"] num_edges = len(edges[0]) num_nodes = graph["num_nodes"] # Conversion to networkx format G = nx.Graph() G.add_nodes_from(range(num_nodes)) G.add_edges_from([(edges[0][i], edges[1][i]) for i in range(num_edges)]) # Plot nx.draw(G) print("Label:", graph['y'])from transformers.models.graphormer.collating_graphormer import preprocess_item, GraphormerDataCollator dataset_processed = dataset.map(preprocess_item, batched=False)# split up training into training + validation train_ds = dataset_processed['train'] val_ds = dataset_processed['validation']print(train_ds[0].keys())from transformers import GraphormerForGraphClassification model_checkpoint = "clefourrier/graphormer-base-pcqm4mv2" # pre-trained model from which to fine-tune model = GraphormerForGraphClassification.from_pretrained( model_checkpoint, num_classes=2, ignore_mismatched_sizes = True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "graph-classification", logging_dir="graph-classification", per_device_train_batch_size=64, per_device_eval_batch_size=64, auto_find_batch_size=True, # batch size can be changed automatically to prevent OOMs gradient_accumulation_steps=10, dataloader_num_workers=4, num_train_epochs=20, evaluation_strategy="epoch", logging_strategy="epoch", push_to_hub=False, )trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=val_ds, data_collator=GraphormerDataCollator() )train_results = trainer.train() # rest is optional but nice to have trainer.save_model() trainer.log_metrics("train", train_results.metrics) trainer.save_metrics("train", train_results.metrics) trainer.save_state()trainer.push_to_hub()
6
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/80_getting_started_with_embeddings.ipynb
model_id = "sentence-transformers/all-MiniLM-L6-v2" hf_token = "get your token in http://hf.co/settings/tokens"import requests from retry import retry api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" headers = {"Authorization": f"Bearer {hf_token}"}@retry(tries=3, delay=10) def query(texts): response = requests.post(api_url, headers=headers, json={"inputs": texts}) result = response.json() if isinstance(result, list): return result elif list(result.keys())[0] == "error": raise RuntimeError( "The model is currently loading, please re-run the query." )texts = ["How do I get a replacement Medicare card?", "What is the monthly premium for Medicare Part B?", "How do I terminate my Medicare Part B (medical insurance)?", "How do I sign up for Medicare?", "Can I sign up for Medicare Part B if I am working and have health insurance through an employer?", "How do I sign up for Medicare Part B if I already have Part A?", "What are Medicare late enrollment penalties?", "What is Medicare and who can get it?", "How can I get help with my Medicare Part A and Part B premiums?", "What are the different parts of Medicare?", "Will my Medicare premiums be higher because of my higher income?", "What is TRICARE ?", "Should I sign up for Medicare Part B if I have Veterans’ Benefits?"] output = query(texts)import pandas as pd embeddings = pd.DataFrame(output)print(embeddings)# This is code required to install git-lfs however it already is installed in Colab instances. #!git lfs installembeddings.to_csv("embedded_faqs_medicare/embeddings.csv", index=False) print(embeddings.shape)%cd embedded_faqs_medicare/import torch from datasets import load_dataset faqs_embeddings = load_dataset('ITESM/embedded_faqs_medicare') dataset_embeddings = torch.from_numpy(faqs_embeddings["train"].to_pandas().to_numpy()).to(torch.float)question = ["How can Medicare help me?"] output = query(question)query_embeddings = torch.FloatTensor(output) print(f"The size of our embedded dataset is {dataset_embeddings.shape} and of our embedded query is {query_embeddings.shape}.")from sentence_transformers.util import semantic_search hits = semantic_search(query_embeddings, dataset_embeddings, top_k=5)[texts[hits[0][i]['corpus_id']] for i in range(len(hits[0]))]
7
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/17_fine_tune_wav2vec2_for_english_asr.ipynb
from datasets import load_dataset, load_metric timit = load_dataset("timit_asr")timittimit = timit.remove_columns(["phonetic_detail", "word_detail", "dialect_region", "id", "sentence_type", "speaker_id"])from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) display(HTML(df.to_html()))show_random_elements(timit["train"].remove_columns(["file"]), num_examples=20)import re chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]' def remove_special_characters(batch): batch["text"] = re.sub(chars_to_ignore_regex, '', batch["text"]).lower() return batchtimit = timit.map(remove_special_characters)show_random_elements(timit["train"].remove_columns(["file"]))def extract_all_chars(batch): all_text = " ".join(batch["text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]}vocabs = timit.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=timit.column_names["train"])vocab_list = list(set(vocabs["train"]["vocab"][0]) | set(vocabs["test"]["vocab"][0]))vocab_dict = {v: k for k, v in enumerate(vocab_list)} vocab_dictvocab_dict["|"] = vocab_dict[" "] del vocab_dict[" "]vocab_dict["[UNK]"] = len(vocab_dict) vocab_dict["[PAD]"] = len(vocab_dict) len(vocab_dict)import json with open('vocab.json', 'w') as vocab_file: json.dump(vocab_dict, vocab_file)from transformers import Wav2Vec2CTCTokenizer tokenizer = Wav2Vec2CTCTokenizer("./vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")from transformers import Wav2Vec2FeatureExtractor feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=False)from transformers import Wav2Vec2Processor processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)# from google.colab import drive # drive.mount('/content/gdrive/')# processor.save_pretrained("/content/gdrive/MyDrive/wav2vec2-base-timit-demo")timit["train"][0]import soundfile as sf def speech_file_to_array_fn(batch): speech_array, sampling_rate = sf.read(batch["file"]) batch["speech"] = speech_array batch["sampling_rate"] = sampling_rate batch["target_text"] = batch["text"] return batchtimit = timit.map(speech_file_to_array_fn, remove_columns=timit.column_names["train"], num_proc=4)import IPython.display as ipd import numpy as np import random rand_int = random.randint(0, len(timit["train"])) ipd.Audio(data=np.asarray(timit["train"][rand_int]["speech"]), autoplay=True, rate=16000)rand_int = random.randint(0, len(timit["train"])) print("Target text:", timit["train"][rand_int]["target_text"]) print("Input array shape:", np.asarray(timit["train"][rand_int]["speech"]).shape) print("Sampling rate:", timit["train"][rand_int]["sampling_rate"])def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["target_text"]).input_ids return batchtimit_prepared = timit.map(prepare_dataset, remove_columns=timit.column_names["train"], batch_size=8, num_proc=4, batched=True)import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batchdata_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)wer_metric = load_metric("wer")def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}from transformers import Wav2Vec2ForCTC model = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-base", gradient_checkpointing=True, ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, )model.freeze_feature_extractor()from transformers import TrainingArguments training_args = TrainingArguments( # output_dir="/content/gdrive/MyDrive/wav2vec2-base-timit-demo", output_dir="./wav2vec2-base-timit-demo", group_by_length=True, per_device_train_batch_size=32, evaluation_strategy="steps", num_train_epochs=30, fp16=True, save_steps=500, eval_steps=500, logging_steps=500, learning_rate=1e-4, weight_decay=0.005, warmup_steps=1000, save_total_limit=2, )from transformers import Trainer trainer = Trainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=timit_prepared["train"], eval_dataset=timit_prepared["test"], tokenizer=processor.feature_extractor, )trainer.train()processor = Wav2Vec2Processor.from_pretrained("patrickvonplaten/wav2vec2-base-timit-demo")model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-timit-demo")def map_to_result(batch): model.to("cuda") input_values = processor( batch["speech"], sampling_rate=batch["sampling_rate"], return_tensors="pt" ).input_values.to("cuda") with torch.no_grad(): logits = model(input_values).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_str"] = processor.batch_decode(pred_ids)[0] return batchresults = timit["test"].map(map_to_result)print("Test WER: {:.3f}".format(wer_metric.compute(predictions=results["pred_str"], references=results["target_text"])))show_random_elements(results.remove_columns(["speech", "sampling_rate"]))model.to("cuda") input_values = processor(timit["test"][0]["speech"], sampling_rate=timit["test"][0]["sampling_rate"], return_tensors="pt").input_values.to("cuda") with torch.no_grad(): logits = model(input_values).logits pred_ids = torch.argmax(logits, dim=-1) # convert ids to tokens " ".join(processor.tokenizer.convert_ids_to_tokens(pred_ids[0].tolist()))
8
0
hf_public_repos/blog
hf_public_repos/blog/notebooks/01_how_to_train.ipynb
#@title %%html <div style="background-color: pink;"> Notebook written in collaboration with <a href="https://github.com/aditya-malte">Aditya Malte</a>. <br> The Notebook is on GitHub, so contributions are more than welcome. </div> <br> <div style="background-color: yellow;"> Aditya wrote another notebook with a slightly different use case and methodology, please check it out. <br> <a target="_blank" href="https://gist.github.com/aditya-malte/2d4f896f471be9c38eb4d723a710768b"> https://gist.github.com/aditya-malte/2d4f896f471be9c38eb4d723a710768b </a> </div> # in this notebook we'll only get one of the files (the Oscar one) for the sake of simplicity and performance !wget -c https://cdn-datasets.huggingface.co/EsperBERTo/data/oscar.eo.txt# We won't need TensorFlow here !pip uninstall -y tensorflow # Install `transformers` from master !pip install git+https://github.com/huggingface/transformers !pip list | grep -E 'transformers|tokenizers' # transformers version at notebook update --- 2.11.0 # tokenizers version at notebook update --- 0.8.0rc1%%time from pathlib import Path from tokenizers import ByteLevelBPETokenizer paths = [str(x) for x in Path(".").glob("**/*.txt")] # Initialize a tokenizer tokenizer = ByteLevelBPETokenizer() # Customize training tokenizer.train(files=paths, vocab_size=52_000, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ])from tokenizers.implementations import ByteLevelBPETokenizer from tokenizers.processors import BertProcessing tokenizer = ByteLevelBPETokenizer( "./EsperBERTo/vocab.json", "./EsperBERTo/merges.txt", )tokenizer._tokenizer.post_processor = BertProcessing( ("</s>", tokenizer.token_to_id("</s>")), ("<s>", tokenizer.token_to_id("<s>")), ) tokenizer.enable_truncation(max_length=512)tokenizer.encode("Mi estas Julien.")tokenizer.encode("Mi estas Julien.").tokens# Check that we have a GPU !nvidia-smi# Check that PyTorch sees it import torch torch.cuda.is_available()from transformers import RobertaConfig config = RobertaConfig( vocab_size=52_000, max_position_embeddings=514, num_attention_heads=12, num_hidden_layers=6, type_vocab_size=1, )from transformers import RobertaTokenizerFast tokenizer = RobertaTokenizerFast.from_pretrained("./EsperBERTo", max_len=512)from transformers import RobertaForMaskedLM model = RobertaForMaskedLM(config=config)model.num_parameters() # => 84 million parameters%%time from transformers import LineByLineTextDataset dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path="./oscar.eo.txt", block_size=128, )from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=True, mlm_probability=0.15 )from transformers import Trainer, TrainingArguments training_args = TrainingArguments( output_dir="./EsperBERTo", overwrite_output_dir=True, num_train_epochs=1, per_gpu_train_batch_size=64, save_steps=10_000, save_total_limit=2, prediction_loss_only=True, ) trainer = Trainer( model=model, args=training_args, data_collator=data_collator, train_dataset=dataset, )%%time trainer.train()trainer.save_model("./EsperBERTo")from transformers import pipeline fill_mask = pipeline( "fill-mask", model="./EsperBERTo", tokenizer="./EsperBERTo" )# The sun <mask>. # => fill_mask("La suno <mask>.")fill_mask("Jen la komenco de bela <mask>.") # This is the beginning of a beautiful <mask>. # =>
9
0
hf_public_repos/blog
hf_public_repos/blog/zh/graphml-classification.md
--- title: "使用 Transformers 进行图分类" thumbnail: /blog/assets/125_intro-to-graphml/thumbnail_classification.png authors: - user: clefourrier translators: - user: MatrixYao --- # 使用 Transformers 进行图分类 <div class="blog-metadata"> <small>Published April 14, 2023.</small> <a target="_blank" class="btn no-underline text-sm mb-5 font-sans" href="https://github.com/huggingface/blog/blob/main/graphml-classification.md"> Update on GitHub </a> </div> <div class="author-card"> <a href="/clefourrier"> <img class="avatar avatar-user" src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1644340617257-noauth.png?w=200&h=200&f=face" title="Gravatar"> <div class="bfc"> <code>clefourrier</code> <span class="fullname">Clémentine Fourrier</span> </div> </a> </div> 在之前的[博文](https://huggingface.co/blog/intro-graphml)中,我们探讨了图机器学习的一些理论知识。这一篇我们将探索如何使用 Transformers 库进行图分类。(你也可以从[此处](https://github.com/huggingface/blog/blob/main/notebooks/graphml-classification.ipynb)下载演示 notebook,跟着一起做!) 目前,Transformers 中唯一可用的图 transformer 模型是微软的 [Graphormer](https://arxiv.org/abs/2106.05234),因此本文的例子将会基于该模型。我们期待看到大家会使用并集成哪些其他模型进 🤗。 ## 软件 要学习本教程,需要安装 `datasets` 和 `transformers`(版本号 >= 4.27.2),你可以使用 `pip install -U datasets transformers` 来安装。 ## 数据 你可以使用自己的图数据集,也可以使用 [Hub 上已有的数据集](https://huggingface.co/datasets?task_categories=task_categories:graph-ml&sort=downloads)。本文我们主要使用已有的数据集,你也可以随时[添加你的数据集](https://huggingface.co/docs/datasets/upload_dataset)到 Hugging Face! ### 数据加载 从 Hub 加载图数据集非常简单。这里,我们加载 OGB 库中的 `ogbg-mohiv` 数据集(该数据集是斯坦福 [开放图基准(Open Graph Benchmark,OGB)](https://ogb.stanford.edu/) 的一部分): ```python from datasets import load_dataset # There is only one split on the hub dataset = load_dataset("OGB/ogbg-molhiv") dataset = dataset.shuffle(seed=0) ``` 这个数据集含三个拆分,`train`、`validation` 和 `test`,所有这些拆分每一行都表示一个图,每个图包含 5 个数据列( `edge_index`、`edge_attr`、`y`、`num_nodes`、`node_feat` ),你可以通过执行 `print(dataset)` 来查看。 如果你还安装了其他图处理库,你还可以用这些库把图可视化出来,并进一步检查数据集。例如,使用 PyGeometric 和 matplotlib: ```python import networkx as nx import matplotlib.pyplot as plt # We want to plot the first train graph graph = dataset["train"][0] edges = graph["edge_index"] num_edges = len(edges[0]) num_nodes = graph["num_nodes"] # Conversion to networkx format G = nx.Graph() G.add_nodes_from(range(num_nodes)) G.add_edges_from([(edges[0][i], edges[1][i]) for i in range(num_edges)]) # Plot nx.draw(G) ``` ### 格式 在 Hub 上,图数据集主要存储为图列表形式(使用 `jsonl` 格式)。 单个图表示为一个字典,以下是我们图分类数据集的理想格式: - `edge_index` 包含图上每条边对应的节点 ID,存储为包含两个`节点列表`的列表(即由一个源节点列表和一个目的节点列表组成的列表)。 - **类型**:2个整数列表的列表。 - **示例**:包含四个节点(0、1、2 和 3)且连接为 1->2、1->3 和 3->1 的图将具有 `edge_index = [[1, 1, 3]、[2、3、1]]`。你可能会注意到此处不存在节点 0,因为在本数据中它与其他节点无边连接。这就是下一个属性很重要的原因。 - `num_nodes` 表示图中可用节点的数目(默认情况下,假定节点按顺序编号)。 - **类型**:整数 - **示例**:在上例中,`num_nodes = 4`。 - `y` 每个图的预测标签(可以是类、属性值或是不同任务的多个二分类标签)。 - **Type**:整数列表(用于多分类)、浮点数(用于回归)或 0/1 列表(用于二元多任务分类) - **示例**:我们可以预测图规模(小 = 0,中 = 1,大 = 2)。本例中,`y = [0]`。 - `node_feat` 包含图中每个节点的可用特征(如果存在),按节点 ID 排序。 - **类型**:整数列表的列表(可选) - **例子**:如上例中的节点可以有一些类型特征(就像分子图中的节点是不同的原子,不同的原子有不同的类型一样)。打比方,本例中 `node_feat = [[1], [0], [1], [1]]`。 - `edge_attr` 包含图中每条边的可用属性(如果存在),按 `edge_index` 排序。 - **类型**:整数列表的列表(可选) - **例子**:仍使用上例,边也可以有类型(如分子中的键),如 edge_attr = [[0], [1], [1]]`。 ### 预处理 图 transformer 框架通常需要根据数据集进行特定的预处理,以生成有助于目标学习任务(在我们的案例中为分类)的特征和属性。 在这里,我们使用 `Graphormer` 的默认预处理,它生成进度/出度信息、节点间的最短路径以及模型感兴趣的其他属性。 ```python from transformers.models.graphormer.collating_graphormer import preprocess_item, GraphormerDataCollator dataset_processed = dataset.map(preprocess_item, batched=False) ``` 我们也可以在 `DataCollat​​or` 的参数中动态进行预处理(通过将 `on_the_fly_processing` 设置为 True)。但并非所有数据集都像 `ogbg-molhiv` 那样小,对于大图,动态预处理成本太高,因此需要预先进行预处理,并存储预处理后的数据供后续训练实验使用。 ## 模型 ### 模型加载 这里,我们加载一个已有的预训练模型及其 checkpoint 并在我们的下游任务上对其进行微调,该任务是一个二分类任务(因此 `num_classes = 2` )。我们还可以在回归任务 (`num_classes = 1`) 或多任务分类上微调我们的模型。 ```python from transformers import GraphormerForGraphClassification model = GraphormerForGraphClassification.from_pretrained( "clefourrier/pcqm4mv2_graphormer_base", num_classes=2, # num_classes for the downstream task ignore_mismatched_sizes=True, ) ``` 我们来看下细节。 在代码中调用 `from_pretrained` 方法来下载并缓存模型权重。由于类的数量(用于预测)取决于数据集,我们将新的 `num_classes` 和`ignore_mismatched_sizes` 与 `model_checkpoint` 一起传给该函数。这会触发函数创建一个自定义的、特定于该下游任务的分类头,这个头与原模型中的解码器头很可能是不同的。 我们也可以创建一个新的随机初始化的模型来从头开始训练,此时,我们既可以复用给定检查点的超参配置,也可以自己手动选择超参配置。 ### 训练或微调 为了简化模型训练,我们使用 `Trainer`。我们需要定义训练相关的配置以及评估指标来实例化 `Trainer`。我们主要使用 `TrainingArguments`类,这是一个包含所有配置项的类,用于定制训练配置。我们要给它一个文件夹名称,用于保存模型的 checkpoint。 ```python from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "graph-classification", logging_dir="graph-classification", per_device_train_batch_size=64, per_device_eval_batch_size=64, auto_find_batch_size=True, # batch size can be changed automatically to prevent OOMs gradient_accumulation_steps=10, dataloader_num_workers=4, #1, num_train_epochs=20, evaluation_strategy="epoch", logging_strategy="epoch", push_to_hub=False, ) ``` 对于图数据集,调整 batch size 和梯度累积步数来保证有效 batch size 够大同时又要避免内存不足,这件事尤为重要。 最后一个参数 `push_to_hub` 允许 `Trainer` 在训练期间定期将模型推送到 Hub,这个通常由保存步长来决定。 ```python trainer = Trainer( model=model, args=training_args, train_dataset=dataset_processed["train"], eval_dataset=dataset_processed["validation"], data_collator=GraphormerDataCollator(), ) ``` 在用于图分类的 `Trainer` 中,对给定的图数据集使用正确的数据整理器(data collator)很重要,这个数据整理器会将图转换为用于训练的 batch 数据。 ```python train_results = trainer.train() trainer.push_to_hub() ``` 训练完后,可以使用 `push_to_hub` 将模型与所有其他训练相关信息一起保存到 hub。 由于此模型比较大,因此在 CPU (Intel Core i7) 上训练/微调 20 个 epoch 大约需要一天时间。想要更快点的话,你可以使用强大的 GPU 和并行化方法,你只需在 Colab notebook 中或直接在你选择的其他集群上启动代码即可。 ## 结束语 现在你已经知道如何使用 `transformers` 来训练图分类模型,我们希望你尝试在 Hub 上分享你最喜欢的图 transformer 模型的 checkpoints、模型以及数据集,以供社区的其他人使用!
0
0
hf_public_repos/blog
hf_public_repos/blog/zh/agents.md
--- title: "授权调用:介绍 Transformers 智能体 2.0 " thumbnail: /blog/assets/agents/thumbnail.png authors: - user: m-ric - user: lysandre - user: pcuenq translators: - user: innovation64 - user: zhongdongy proofreader: true --- # 授权调用: 介绍 Transformers 智能体 2.0 ## 简要概述 我们推出了 Transformers 智能体 2.0! ⇒ 🎁 在现有智能体类型的基础上,我们新增了两种能够 **根据历史观察解决复杂任务的智能体**。 ⇒ 💡 我们致力于让代码 **清晰、模块化,并确保最终提示和工具等通用属性透明化**。 ⇒ 🤝 我们加入了 **分享功能**,以促进社区智能体的发展。 ⇒ 💪 **全新的智能体框架性能卓越**,使得 Llama-3-70B-Instruct 智能体在 GAIA 排行榜上超越了基于 GPT-4 的智能体! 🚀 快去体验,力争在 GAIA 排行榜上名列前茅! ## 目录 - [什么是智能体?](#什么是智能体) - [Transformers 智能体的方法](#Transformers-智能体的方法) - [主要元素](#主要元素) - [示例用例](# 示例用例) - [自我修正的检索增强生成](#自我修正的检索增强生成) - [使用简单的多智能体设置 🤝 进行高效的网页浏览](#使用简单的多智能体设置进行高效的网页浏览) - [测试我们的智能体](#测试我们的智能体) - [对大型语言模型引擎进行基准测试](#对大型语言模型引擎进行基准测试) - [使用多模态智能体刷 GAIA 排行榜](#使用多模态智能体刷-GAIA-排行榜) - [总结](#总结) ## 什么是智能体? 大型语言模型 (LLMs) 能够处理广泛的任务,但它们通常在逻辑、计算和搜索等特定任务上表现不佳。当在这些它们表现不好的领域被提示时,它们经常无法生成正确的答案。 克服这种弱点的一种方法就是创建一个 **智能体**,它只是一个由 LLM 驱动的程序。智能体通过 **工具** 获得能力,帮助它执行动作。当智能体需要特定技能来解决特定问题时,它会依赖于工具箱中的适当工具。 因此,在解决问题时,如果智能体需要特定技能,它可以直接依赖于工具箱中的适当工具。 实验上,智能体框架通常表现非常好,在多个基准测试上达到了 SOTA。例如,看看 [HumanEval 的最上面的提交](https://paperswithcode.com/sota/code-generation-on-humaneval): 它们就是智能体系统。 ## Transformers 智能体方法 构建智能体的过程很复杂,需要高水平的清晰度和模块化设计。一年前,我们发布了 Transformers 智能体,现在我们正加倍努力实现我们的核心设计目标。 我们的框架力求实现: - **简化以提升清晰度:** 我们将抽象减少到最低限度。简单的错误日志和可访问的属性让你轻松检查系统发生的情况,从而获得更多的清晰度。 - **模块化设计:** 我们更愿意提供构建模块,而不是一个完整、复杂的特性集。这样你可以自由选择最适合你的项目的构建模块。 - 例如,由于任何智能体系统只是由 LLM 引擎驱动的载体,我们决定在概念上分离这两者,使你可以用任何底层 LLM 创建任何类型的智能体。 此外,我们还提供 **分享功能**,让你能在前人的基础上继续构建! ### 主要元素 - `Tool` (工具): 这是一个类,允许你使用工具或实现一个新的工具。它主要由一个可调用的前向`method` 组成,执行工具动作,以及一些必要的属性: `name` (名称) 、`descriptions` (描述) 、`inputs` (输入) 和`output_type` (输出类型)。这些属性用于动态生成工具的使用手册,并将其插入到 LLM 的提示中。 - `Toolbox` (工具箱): 这是一组工具,作为资源提供给智能体,用于解决特定任务。出于性能考虑,工具箱中的工具已经实例化并准备好使用。这是因为某些工具需要时间来初始化,所以通常更好的是重用现有的工具箱,只更换一个工具,而不是在每次智能体初始化时从头开始构建一组工具。 - `CodeAgent` (代码智能体): 一个非常简单的智能体,其动作作为单个 Python 代码块生成。它将无法对先前的观察进行迭代。 - `ReactAgent` (反应智能体): ReAct 智能体遵循一个循环: 思考 ⇒ 行动 ⇒ 观察直到解决任务。我们提出了两种 ReActAgent 类: - `ReactCodeAgent` (反应代码智能体) 将其动作作为 Python 代码块生成。 - `ReactJsonAgent` (反应 JSON 智能体) 将其动作作为 JSON 代码块生成。 查看 [文档](https://huggingface.co/docs/transformers/en/main_classes/agent) 了解如何使用每个组件! 智能体在底层是如何工作的? 本质上,智能体的作用是“允许 LLM 使用工具”。智能体有一个关键的 `agent.run()` 方法,该方法: - 在一个 **特定提示** 中向你的 LLM 提供关于工具使用的信息。这样,LLM 可以选择运行工具来解决任务。 - **解析** 来自 LLM 输出的工具调用 (可以通过代码、JSON 格式或任何其他格式)。 - **执行** 调用。 - 如果智能体被设计为对先前的输出进行迭代,它会 **保留** 先前的工具调用和观察的记忆。这个记忆可以根据你希望它持续的时间长短而变得更加或更少细致。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/agent_single_multistep.png" alt="graph of agent workflows" width=90%> </p> 关于智能体的更多一般背景信息,你可以阅读 Lilian Weng 的 [这篇优秀博客](https://lilianweng.github.io/posts/2023-06-23-agent/),或者阅读我们之前的博客,关于使用 LangChain 构建 [智能体](https://huggingface.co/blog/open-source-llms-as-agents)。 要深入了解我们的包,请查看 [智能体文档](https://huggingface.co/docs/transformers/en/transformers_agents)。 ## 示例用例 为了获得此功能的早期访问权限,请首先从其 `main` 分支安装 `transformers` : ``` pip install "git+https://github.com/huggingface/transformers.git#egg=transformers[agents]" ``` 智能体 2.0 将在 v4.41.0 版本中发布,预计将于五月中旬上线。 ### 自我修正的检索增强生成 快速定义: 检索增强生成 (RAG) 是“使用 LLM 回答用户查询,但基于从知识库检索到的信息来回答”。与使用普通或微调的 LLM 相比,它有许多优点: 举几个例子,它允许基于真实事实来回答问题,减少虚构,它允许向 LLM 提供特定领域的知识,并且可以细粒度地控制对知识库信息的访问。 假设我们想要执行 RAG,并且某些参数必须动态生成。例如,根据用户查询,我们可能想要将搜索限制在知识库的特定子集,或者我们可能想要调整检索到的文档数量。难题是: 如何根据用户查询动态调整这些参数? 嗯,我们可以通过让我们的智能体访问这些参数来实现! 让我们设置这个系统。 安装以下依赖项: ``` pip install langchain sentence-transformers faiss-cpu ``` 我们首先加载一个想要在其上执行 RAG 的知识库: 这个数据集是许多 `huggingface` 包的文档页面汇编,以 markdown 格式存储。 ```python import datasets knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") ``` 现在我们通过处理数据集并将其存储到向量数据库中来准备知识库,以便检索器使用。我们将使用 LangChain,因为它具有用于向量数据库的优秀工具: ```python from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS from langchain_community.embeddings import HuggingFaceEmbeddings source_docs = [ Document( page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]} ) for doc in knowledge_base ] docs_processed = RecursiveCharacterTextSplitter(chunk_size=500).split_documents(source_docs)[:1000] embedding_model = HuggingFaceEmbeddings("thenlper/gte-small") vectordb = FAISS.from_documents( documents=docs_processed, embedding=embedding_model ) ``` 现在我们已经准备好了数据库,让我们构建一个基于它回答用户查询的 RAG 系统! 我们希望我们的系统根据查询仅从最相关的信息来源中选择。 我们的文档页面来自以下来源: ```python >>> all_sources = list(set([doc.metadata["source"] for doc in docs_processed])) >>> print(all_sources) ['blog', 'optimum', 'datasets-server', 'datasets', 'transformers', 'course', 'gradio', 'diffusers', 'evaluate', 'deep-rl-class', 'peft', 'hf-endpoints-documentation', 'pytorch-image-models', 'hub-docs'] ``` 我们如何根据用户查询选择相关的来源? 👉 让我们构建一个 RAG 系统作为智能体,它将自由选择其信息来源! 我们创建一个检索工具,智能体可以使用它选择的参数来调用: ```python import json from transformers.agents import Tool from langchain_core.vectorstores import VectorStore class RetrieverTool(Tool): name = "retriever" description = "Retrieves some documents from the knowledge base that have the closest embeddings to the input query." inputs = { "query": { "type": "text", "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", }, "source": { "type": "text", "description": "" }, } output_type = "text" def __init__(self, vectordb: VectorStore, all_sources: str, **kwargs): super().__init__(**kwargs) self.vectordb = vectordb self.inputs["source"]["description"] = ( f"The source of the documents to search, as a str representation of a list. Possible values in the list are: {all_sources}. If this argument is not provided, all sources will be searched." ) def forward(self, query: str, source: str = None) -> str: assert isinstance(query, str), "Your search query must be a string" if source: if isinstance(source, str) and "[" not in str(source): # if the source is not representing a list source = [source] source = json.loads(str(source).replace("'", '"')) docs = self.vectordb.similarity_search(query, filter=({"source": source} if source else None), k=3) if len(docs) == 0: return "No documents found with this filtering. Try removing the source filter." return "Retrieved documents:\n\n" + "\n===Document===\n".join( [doc.page_content for doc in docs] ) ``` 现在创建一个利用这个工具的智能体就很简单了! 智能体在初始化时需要以下参数: - _`tools`_ : 智能体将能够调用的工具列表。 - _`llm_engine`_ : 驱动智能体的 LLM。 我们的 `llm_engine` 必须是一个可调用的对象,它接受一个 [messages](https://huggingface.co/docs/transformers/main/chat_templating) 列表作为输入并返回文本。它还需要接受一个 `stop_sequences` 参数,指示何时停止生成。为了方便起见,我们直接使用包中提供的 `HfEngine` 类来获取一个调用我们的 [Inference API](https://huggingface.co/docs/api-inference/en/index) 的 LLM 引擎。 ```python from transformers.agents import HfEngine, ReactJsonAgent llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct") agent = ReactJsonAgent( tools=[RetrieverTool(vectordb, all_sources)], llm_engine=llm_engine ) agent_output = agent.run("Please show me a LORA finetuning script") print("Final output:") print(agent_output) ``` 由于我们将智能体初始化为 `ReactJsonAgent` ,它已经自动获得了一个默认的系统提示,告诉 LLM 引擎逐步处理并生成工具调用作为 JSON 代码块 (根据需要,你可以替换此提示模板)。 然后,当它的 `.run()` 方法被启动时,智能体会负责调用 LLM 引擎,解析工具调用的 JSON 代码块并执行这些工具调用,所有这些都在一个循环中进行,只有在提供最终答案时才会结束。 我们得到以下输出: ``` Calling tool: retriever with arguments: {'query': 'LORA finetuning script', 'source': "['transformers', 'datasets-server', 'datasets']"} Calling tool: retriever with arguments: {'query': 'LORA finetuning script'} Calling tool: retriever with arguments: {'query': 'LORA finetuning script example', 'source': "['transformers', 'datasets-server', 'datasets']"} Calling tool: retriever with arguments: {'query': 'LORA finetuning script example'} Calling tool: final_answer with arguments: {'answer': 'Here is an example of a LORA finetuning script: https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371'} Final output: Here is an example of a LORA finetuning script: https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371 ``` 我们可以看到自我修正的实际效果: 智能体最初尝试限制来源,但由于缺乏相应的文档,它最终没有限制任何来源。 我们可以通过检查第二步的日志中的 llm 输出来验证这一点: `print(agent.logs[2]['llm_output'])` 。 ``` Thought: I'll try to retrieve some documents related to LORA finetuning scripts from the entire knowledge base, without any source filtering. Action: { "action": "retriever", "action_input": {"query": "LORA finetuning script"} } ``` ### 使用简单的多智能体设置 🤝 进行高效的网页浏览 在这个例子中,我们想要构建一个智能体并在 GAIA 基准测试上对其进行测试 ([Mialon et al. 2023](https://huggingface.co/papers/2311.12983))。GAIA 是一个非常困难的基准测试,大多数问题需要使用不同的工具进行多个步骤的推理。一个特别困难的要求是拥有一个强大的网络浏览器,能够导航到具有特定约束条件的页面: 使用网站的内部导航发现页面,按时间选择特定的文章 … 网页浏览需要深入到子页面并滚动大量不必要的文本标记,这对于解决更高级别的任务是不必要的。我们将网页浏览的子任务分配给一个专业的网页浏览智能体。我们为其提供了一些浏览网页的工具和一个特定的提示 (查看仓库以找到特定的实现)。 定义这些工具超出了本文的范围: 但是你可以在 [仓库](https://github.com/aymeric-roucher/agent_reasoning_benchmark) 中找到特定的实现。 ```python from transformers.agents import ReactJsonAgent, HfEngine WEB_TOOLS = [ SearchInformationTool(), NavigationalSearchTool(), VisitTool(), DownloadTool(), PageUpTool(), PageDownTool(), FinderTool(), FindNextTool(), ] websurfer_llm_engine = HfEngine( model="CohereForAI/c4ai-command-r-plus" ) # 我们选择 Command-R+ 因为它具有很高的上下文长度 websurfer_agent = ReactJsonAgent( tools=WEB_TOOLS, llm_engine=websurfer_llm_engine, ) ``` 为了允许更高层次的任务解决智能体调用这个智能体,我们可以简单地将其封装在另一个工具中: ```python class SearchTool(Tool): name = "ask_search_agent" description = "A search agent that will browse the internet to answer a question. Use it to gather informations, not for problem-solving." inputs = { "question": { "description": "Your question, as a natural language sentence. You are talking to an agent, so provide them with as much context as possible.", "type": "text", } } output_type = "text" def forward(self, question: str) -> str: return websurfer_agent.run(question) ``` 然后我们使用这个搜索工具初始化任务解决智能体: ```python from transformers.agents import ReactCodeAgent llm_engine = HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct") react_agent_hf = ReactCodeAgent( tools=[SearchTool()], llm_engine=llm_engine, ) ``` 让我们做这个任务: > _使用 Marisa Alviar-Agnew 和 Henry Agnew 根据 CK-12 许可在 LibreText 的《初级化学》材料中提供的密度数据,编译日期为 2023 年 8 月 21 日。_ > _我有一加仑的蜂蜜和一加仑的蛋黄酱,温度为 25 摄氏度。我每次从一加仑蜂蜜中取出一杯蜂蜜。我要取出多少次一杯蜂蜜,才能使蜂蜜的重量低于蛋黄酱?假设容器本身的重量相同。_ ``` Thought: I will use the 'ask_search_agent' tool to find the density of honey and mayonnaise at 25C. ==== Agent is executing the code below: density_honey = ask_search_agent(question="What is the density of honey at 25C?") print("Density of honey:", density_honey) density_mayo = ask_search_agent(question="What is the density of mayonnaise at 25C?") print("Density of mayo:", density_mayo) === Observation: Density of honey: The density of honey is around 1.38-1.45kg/L at 20C. Although I couldn't find information specific to 25C, minor temperature differences are unlikely to affect the density that much, so it's likely to remain within this range. Density of mayo: The density of mayonnaise at 25°C is 0.910 g/cm³. ===== New step ===== Thought: I will convert the density of mayonnaise from g/cm³ to kg/L and then calculate the initial weights of the honey and mayonnaise in a gallon. After that, I will calculate the weight of honey after removing one cup at a time until it weighs less than the mayonnaise. ==== Agent is executing the code below: density_honey = 1.42 # taking the average of the range density_mayo = 0.910 # converting g/cm³ to kg/L density_mayo = density_mayo * 1000 / 1000 # conversion gallon_to_liters = 3.785 # conversion factor initial_honey_weight = density_honey * gallon_to_liters initial_mayo_weight = density_mayo * gallon_to_liters cup_to_liters = 0.236 # conversion factor removed_honey_weight = cup_to_liters * density_honey === Observation: ===== New step ===== Thought: Now that I have the initial weights of honey and mayonnaise, I'll try to calculate the number of cups to remove from the honey to make it weigh less than the mayonnaise using a simple arithmetic operation. ==== Agent is executing the code below: cups_removed = int((initial_honey_weight - initial_mayo_weight) / removed_honey_weight) + 1 print("Cups removed:", cups_removed) final_answer(cups_removed) === >>> Final answer: 6 ``` ✅ 答案是 **正确的**! ## 测试我们的智能体 让我们使用智能体框架进行一些基准测试,看看不同模型的表现! 以下实验的所有代码都可以在 [这里](https://github.com/aymeric-roucher/agent_reasoning_benchmark) 找到。 ### 基准测试大型语言模型引擎 `agents_reasoning_benchmark` 是一个小型但强大的推理测试,用于评估智能体性能。这个基准测试已经在 [我们之前的博客](https://huggingface.co/blog/open-source-llms-as-agents) 中使用并进行了更详细的解释。 这个想法是,你为智能体使用的工具选择可以极大地改变某些任务的性能。因此,这个基准测试限制了使用的工具集为一个计算器和一个非常基础的搜索工具。我们从几个数据集中挑选了问题,这些问题只能使用这两个工具来解决: - **来自 [HotpotQA](https://huggingface.co/datasets/hotpot_qa) 的 30 个问题** ([Yang et al., 2018](https://huggingface.co/papers/1809.09600)),用于测试搜索工具的使用。 - **来自 [GSM8K](https://huggingface.co/datasets/gsm8k) 的 40 个问题** ([Cobbe et al., 2021](https://huggingface.co/papers/2110.14168)),用于测试计算器工具的使用。 - **来自 [GAIA](https://huggingface.co/datasets/gaia-benchmark/GAIA) 的 20 个问题** ([Mialon et al., 2023](https://huggingface.co/papers/2311.12983)),用于测试使用这两个工具解决困难问题的能力。 在这里,我们尝试了三种不同的引擎: [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1), [Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct),和 [GPT-4 Turbo](https://platform.openai.com/docs/models)。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/aggregate_score.png" alt="benchmark of agent performances" width=90%> </p> 结果在上方显示 - 为了提高精度,我们显示了两轮完整运行的平均值。我们还测试了 [Command-R+](https://huggingface.co/CohereForAI/c4ai-command-r-plus) 和 [Mixtral-8x22B](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1),但由于清晰度原因,这里没有显示。 ⇒ **Llama-3-70B-Instruct 在开源模型中领先: 它与 GPT-4 相当,尤其在与 `ReactCodeAgent` 的结合中表现出色,得益于 Llama 3 强大的编码性能!** 💡 比较基于 JSON 和基于代码的 React 智能体很有趣: 对于像 Mixtral-8x7B 这样较弱的 LLM 引擎,基于代码的智能体不如基于 JSON 的智能体表现好,因为 LLM 引擎经常无法生成好的代码。但随着更强大的模型作为引擎,基于代码的版本表现尤为出色: 在我们的经验中,基于代码的版本甚至在使用 Llama-3-70B-Instruct 时超越了基于 JSON 的版本。因此,我们使用基于代码的版本进行下一个挑战: 在完整的 GAIA 基准测试上进行测试。 ### 使用多模态智能体刷 GAIA 排行榜 [GAIA](https://huggingface.co/datasets/gaia-benchmark/GAIA) ([Mialon et al., 2023](https://huggingface.co/papers/2311.12983)) 是一个非常困难的基准测试: 在上面的 `agent_reasoning_benchmark` 中可以看到,即使我们挑选了可以使用两种基本工具解决的任务,模型也几乎没有达到 50% 的表现。 现在我们想要在完整的测试集上获得分数,不再挑选问题。因此,我们需要覆盖所有模态,这导致我们使用这些特定的工具: - `SearchTool` : 如上所述的网页浏览器。 - `TextInspectorTool` : 将文档作为文本文件打开并返回其内容。 - `SpeechToTextTool` : 将音频文件转录为文本。我们使用基于 [distil-whisper](https://huggingface.co/distil-whisper/distil-large-v3) 的默认工具。 - `VisualQATool` : 分析图像的视觉内容。对于这些,我们使用全新的 [Idefics2-8b-chatty](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty)! 我们首先初始化这些工具 (更多细节,请检查 [仓库](https://github.com/aymeric-roucher/agent_reasoning_benchmark) 中的代码)。 然后我们初始化我们的智能体: ```python from transformers.agents import ReactCodeAgent, HfEngine TASK_SOLVING_TOOLBOX = [ SearchTool(), VisualQATool(), SpeechToTextTool(), TextInspectorTool(), ] react_agent_hf = ReactCodeAgent( tools=TASK_SOLVING_TOOLBOX, llm_engine=HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct"), memory_verbose=True, ) ``` 在完成 165 个问题所需的一段时间后,我们提交了我们的结果到 [GAIA 排行榜](https://huggingface.co/spaces/gaia-benchmark/leaderboard),然后…… 🥁🥁🥁 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/leaderboard.png" alt="GAIA leaderboard" width=90%> </p> ⇒ 我们的智能体排名第四: 它超过了许多基于 GPT-4 的智能体,现在已成为开源类别中的领先竞争者! ## 总结 在接下来的几个月里,我们将继续改进这个包。我们已经在我们开发路线图中确定了几个令人兴奋的路径: - 更多的智能体共享选项: 目前你可以从 Hub 推送或加载工具,我们将实现推送/加载智能体。 - 更好的工具,特别是用于图像处理。 - 长期记忆管理。 - 多智能体协作。 👉 **去尝试一下 Transformers 智能体!** 我们期待着收到你的反馈和你的想法。 让我们一起用更多的开源模型刷排行榜登顶! 🚀
1
0
hf_public_repos/blog
hf_public_repos/blog/zh/intel-sapphire-rapids-inference.md
--- title: "CPU 推理 | 使用英特尔 Sapphire Rapids 加速 PyTorch Transformers" thumbnail: /blog/assets/129_intel_sapphire_rapids_inference/01.png authors: - user: juliensimon translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # CPU 推理 | 使用英特尔 Sapphire Rapids 加速 PyTorch Transformers 在 [最近的一篇文章](https://huggingface.co/blog/zh/intel-sapphire-rapids) 中,我们介绍了代号为 [Sapphire Rapids](https://en.wikipedia.org/wiki/Sapphire_Rapids) 的第四代英特尔至强 CPU 及其新的先进矩阵扩展 ([AMX](https://en.wikipedia.org/wiki/Advanced_Matrix_Extensions)) 指令集。通过使用 Amazon EC2 上的 Sapphire Rapids 服务器集群并结合相应的英特尔优化库,如 [英特尔 PyTorch 扩展](https://github.com/intel/intel-extension-for-pytorch) (IPEX),我们展示了如何使用 CPU 进行高效的分布式大规模训练,与上一代至强 (Ice Lake) 相比,Sapphire Rapids 实现了 8 倍的加速,取得了近线性的扩展比。 本文我们将重点关注推理。使用基于 PyTorch 的 Hugging Face transformers 模型,我们首先在 Ice Lake 服务器上分别测量它们在长、短两种文本序列上的性能。然后,我们在 Sapphire Rapids 服务器和最新版本的 Hugging Face Optimum Intel 上执行相同的测试,并比较两代 CPU 的性能。这里,[Optimum Intel](https://github.com/huggingface/optimum-intel) 是一个专用于英特尔平台的硬件加速开源库。 让我们开始吧! ## 为什么你应该考虑使用 CPU 推理 在决定使用 CPU 还是 GPU 进行深度学习推理时需要考虑多个因素。最重要的当然是模型的大小。一般来说,较大的模型能更多地受益于 GPU 提供的强大算力,而较小的模型可以在 CPU 上高效运行。 另一个需要考虑的因素是模型和推理任务本身的并行度。GPU 为大规模并行处理而设计,因此它们可能对那些可以高度并行化的任务更高效。而另一方面,如果模型或推理任务并没有特别高的并行度,CPU 可能是更有效的选择。 成本也是一个需要考虑的重要因素。GPU 可能很昂贵,而使用 CPU 可能是一种性价比更高的选择,尤其是在业务应用并不需要极低延迟的情况下。此外,如果你需要能够轻松扩缩推理实例的数量,或者如果你需要能够在各种平台上进行推理,使用 CPU 可能是更灵活的选择。 现在,让我们开始配置我们的测试服务器。 ## 配置我们的测试服务器 和上一篇文章一样,我们将使用 Amazon EC2 实例: * 一个基于 Ice Lake 架构 `c6i.16xlarge` 实例, * 一个基于 Sapphire Rapids 架构的 `r7iz.16xlarge-metal` 实例。你可以在 [AWS 网站](https://aws.amazon.com/ec2/instance-types/r7iz/)上获取有关新 r7iz 系列的更多信息。 两个实例都有 32 个物理核 (因此有 64 个 vCPU)。我们将用相同的方式来设置它们: * 基于 Linux 5.15.0 内核的 Ubuntu 22.04 (`ami-0574da719dca65348`), * PyTorch 1.13 与 IPEX (Intel Extension for PyTorch) 1.13, * Transformers 4.25.1. 唯一的区别是在 r7iz 实例上我们多装一个 Optimum Intel 库。 以下是设置步骤。像往常一样,我们建议使用虚拟环境来保证环境纯净。 ``` sudo apt-get update # Add libtcmalloc for extra performance sudo apt install libgoogle-perftools-dev -y export LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so" sudo apt-get install python3-pip -y pip install pip --upgrade export PATH=/home/ubuntu/.local/bin:$PATH pip install virtualenv virtualenv inference_env source inference_env/bin/activate pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/cpu pip3 install intel_extension_for_pytorch==1.13.0 -f https://developer.intel.com/ipex-whl-stable-cpu pip3 install transformers # Only needed on the r7iz instance pip3 install optimum[intel] ``` 在两个实例上完成上述步骤后,我们就可以开始运行测试了。 ## 对流行的 NLP 模型进行基准测试 在这个例子中,我们将在文本分类任务上对几个 NLP 模型进行基准测试: [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased), [bert-base-uncased](https://huggingface.co/bert-base-uncased), [roberta-base](https://huggingface.co/roberta-base) 和 roberta-base。你可以在 Github 上找到 [完整脚本](https://gist.github.com/juliensimon/7ae1c8d12e8a27516e1392a3c73ac1cc)。当然,你也可以用你自己的模型随意尝试! ``` models = ["distilbert-base-uncased", "bert-base-uncased", "roberta-base"] ``` 我们使用序列长度分别为 16 和 128 的两种句子来测试,同时我们也将在这两种句子上分别测量单句推理和批量推理的平均预测延迟和 p99 预测延迟。该测试方案模拟了真实场景,因此可以较好地近似在真实场景中的预期加速比。 ``` sentence_short = "This is a really nice pair of shoes, I am completely satisfied with my purchase" sentence_short_array = [sentence_short] * 8 sentence_long = "These Adidas Lite Racer shoes hit a nice sweet spot for comfort shoes. Despite being a little snug in the toe box, these are very comfortable to wear and provide nice support while wearing. I would stop short of saying they are good running shoes or cross-trainers because they simply lack the ankle and arch support most would desire in those type of shoes and the treads wear fairly quickly, but they are definitely comfortable. I actually walked around Disney World all day in these without issue if that is any reference. Bottom line, I use these as the shoes they are best; versatile, inexpensive, and comfortable, without expecting the performance of a high-end athletic sneaker or expecting the comfort of my favorite pair of slippers." sentence_long_array = [sentence_long] * 8 ``` 基准测试功能非常简单。在几次预热迭代后,我们使用 pipeline API 运行 1000 次预测,把预测时间存下来,并计算它们的均值和 p99 值。 ``` import time import numpy as np def benchmark(pipeline, data, iterations=1000): # Warmup for i in range(100): result = pipeline(data) times = [] for i in range(iterations): tick = time.time() result = pipeline(data) tock = time.time() times.append(tock - tick) return "{:.2f}".format(np.mean(times) * 1000), "{:.2f}".format( np.percentile(times, 99) * 1000 ) ``` 在 c6i (Ice Lake) 实例上,我们只使用普通的 Transformers pipeline。 ``` from transformers import pipeline for model in models: print(f"Benchmarking {model}") pipe = pipeline("sentiment-analysis", model=model) result = benchmark(pipe, sentence_short) print(f"Transformers pipeline, short sentence: {result}") result = benchmark(pipe, sentence_long) print(f"Transformers pipeline, long sentence: {result}") result = benchmark(pipe, sentence_short_array) print(f"Transformers pipeline, short sentence array: {result}") result = benchmark(pipe, sentence_long_array) print(f"Transformers pipeline, long sentence array: {result}") ``` 在 r7iz (Sapphire Rapids) 实例上,我们同时使用普通 pipeline 和 Optimum pipeline。在 Optimum pipeline 中,我们启用 `bfloat16` 模式以利用到 AMX 指令,并将 `jit` 设置为 `True` 以使用即时编译进一步优化模型。 ``` import torch from optimum.intel import inference_mode with inference_mode(pipe, dtype=torch.bfloat16, jit=True) as opt_pipe: result = benchmark(opt_pipe, sentence_short) print(f"Optimum pipeline, short sentence: {result}") result = benchmark(opt_pipe, sentence_long) print(f"Optimum pipeline, long sentence: {result}") result = benchmark(opt_pipe, sentence_short_array) print(f"Optimum pipeline, short sentence array: {result}") result = benchmark(opt_pipe, sentence_long_array) print(f"Optimum pipeline, long sentence array: {result}") ``` 为简洁起见,我们先看下 [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) 的 p99 结果。你可以在文章末尾找到所有测例的完整结果。 <kbd> <img src="../assets/129_intel_sapphire_rapids_inference/01.png"> </kbd> 如上图所示,与上一代至强 CPU 相比,Sapphire Rapids 上单个句子的预测延迟加速了 **60-65%**。也就是说,由于结合了英特尔 Sapphire Rapids 平台以及 Hugging Face Optimum 的优化,你只需对代码进行很少改动就可将预测速度提高 3 倍。 这让我们即使在长文本序列上也可以达到 **个位数的预测延迟**。在 Sapphire Rapids 之前,这样的性能只有通过 GPU 才能实现。 ## 结论 第四代英特尔至强 CPU 提供了出色的推理性能,尤其是在与 Hugging Face Optimum 结合使用时。这是深度学习在更易得和更具成本效益的道路上的又一个进步,我们期待与英特尔的朋友们在这条道路上继续合作。 以下是一些可帮助你入门的其他资源: * [Intel IPEX](https://github.com/intel/intel-extension-for-pytorch) GitHub 仓库 * [Hugging Face Optimum](https://github.com/huggingface/optimum) GitHub 仓库 如果你有任何问题或反馈,我们很乐意在 [Hugging Face 论坛](https://discuss.huggingface.co/) 上与你交流。 感谢阅读! ## 附录: 完整结果 <kbd> <img src="../assets/129_intel_sapphire_rapids_inference/02.png"> </kbd> *基准测试软件环境:Ubuntu 22.04 with libtcmalloc, Linux 5.15.0 patched for Intel AMX support, PyTorch 1.13 with Intel Extension for PyTorch, Transformers 4.25.1, Optimum 1.6.1, Optimum Intel 1.7.0.dev0*
2
0
hf_public_repos/blog
hf_public_repos/blog/zh/vision_language_pretraining.md
--- title: 深入了解视觉语言模型 thumbnail: /blog//assets/128_vision_language_pretraining/thumbnail.png authors: - user: adirik - user: sayakpaul translators: - user: MatrixYao --- # 深入了解视觉语言模型 人类学习本质上是多模态 (multi-modal) 的,因为联合利用多种感官有助于我们更好地理解和分析新信息。理所当然地,多模态学习的最新进展即是从这一人类学习过程的有效性中汲取灵感,创建可以利用图像、视频、文本、音频、肢体语言、面部表情和生理信号等各种模态信息来处理和链接信息的模型。 自 2021 年以来,我们看到大家对结合视觉和语言模态的模型 (也称为联合视觉语言模型) 的兴趣越来越浓,一个例子就是 [OpenAI 的 CLIP](https://openai.com/blog/clip/)。联合视觉语言模型在非常具有挑战性的任务中表现出了让人眼前一亮的能力,诸如图像标题生成、文本引导图像生成、文本引导图像操作以及视觉问答等。这个领域在不断发展,其零样本泛化能力也在不断改进,从而产生了各种实际应用。 本文,我们将介绍联合视觉语言模型,重点关注它们的训练方式。我们还将展示如何利用 🤗 Transformers 对该领域的最新进展进行实验。 ## 目录 1. [简介](#简介) 2. [学习策略](#学习策略) 1. [对比学习](#1-对比学习) 2. [PrefixLM](#2-prefixlm) 3. [基于交叉注意力的多模态融合](#3-多模态融合与交叉注意力) 4. [掩膜语言建模及图文匹配](#4-掩膜语言建模及图文匹配) 5. [无训练](#5-无训练) 3. [数据集](#数据集) 4. [在 🤗 Transformers 中支持视觉语言模型](#在-🤗-transformers-中支持视觉语言模型) 5. [新兴研究领域](#新兴研究领域) 6. [结论](#结论) ## 简介 将模型称为 “视觉语言” 模型是什么意思?一个结合了视觉和语言模态的模型?但这到底是什么意思呢? 有助于定义此类模型的一个特性是它们处理图像 (视觉) 和自然语言文本 (语言) 的能力。而这个过程体现在输入、输出以及要求这些模型执行的任务上。 以零样本图像分类任务为例。我们将传给模型如下一张图像和一些候选提示 (prompt),以获得与输入图像最匹配的提示。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/example1.png" alt="drawing"><br> <em>小动物图片 <a href=https://www.istockphoto.com/photos/dog-cat-love>出处</a></em> </p> 为了预测类似的东西,模型需要理解输入图像和文本提示。它将使用单独或融合的视觉和语言编码器来达到理解的目的。 输入和输出可以有多种形式。下面仅举几例: - 用自然语言文本来检索图像。 - 短语关联 (Phrase grounding),即在输入图像中检测出文本中提到的短语 (例如: 一个 **年轻人** 挥动 **球拍**)。 - 视觉问答,即在输入图像中找到自然语言问题的答案。 - 为给定图像生成标题。该任务还有一个形式就是条件文本生成,此时输入变成了两个,即自然语言提示和图像。 - 从包含图像和文本模态的社交媒体内容中检测仇恨言论。 ## 学习策略 视觉语言模型通常由 3 个关键元素组成: 图像编码器、文本编码器以及融合两个编码器的信息的策略。这些关键元素紧密耦合在一起,因为损失函数是围绕模型架构和学习策略设计的。虽然视觉语言模型研究算不上是一个新的研究领域,但此类模型的设计随着时间的变迁发生了巨大变化。早期的研究采用手工设计的图像描述子、预训练词向量或基于频率的 TF-IDF 特征,而最新的研究主要采用 [Transformer](https://arxiv.org/abs/1706.03762) 架构的图像和文本编码器来单独或联合学习图像和文本特征。我们使用战略性的预训练目标来训练这些模型,从而使之可用于各种下游任务。 在本节中,我们将讨论视觉语言模型的一些典型预训练目标和策略,这些模型已被证明有良好的迁移性能。我们还将讨论其他有趣的东西,它们要么特定于某些预训练目标,要么可以用作预训练的通用组件。 我们将在预训练目标中涵盖以下主题: - **对比学习**: 以对比方式将图像和文本对齐到联合特征空间 - **PrefixLM**: 通过将图像视作语言模型的前缀来联合学习图像和文本嵌入 - **基于交叉注意力的多模态融合**: 将视觉信息融合到具有交叉注意力机制的语言模型的各层中 - **MLM / ITM**: 使用掩码语言建模 (Masked-Language Modeling,MLM) 和图像文本匹配 (Image-Text Matching,ITM) 目标将图像的各部分与文本对齐 - **无训练**: 通过迭代优化来利用独立视觉和语言模型 请注意,本节并未详尽陈述所有方法,还有各种其他方法以及混合策略,例如 [Unified-IO](https://arxiv.org/abs/2206.08916)。如需更全面地了解多模态模型,请参阅 [此项工作](https://arxiv.org/abs/2210.09263)。 ### 1) 对比学习 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/contrastive_learning.png" alt="Contrastive Learning"><br> <em>对比预训练和零样本图像分类有关内容参见 <a href=https://openai.com/blog/clip>这里</a></em> </p> 对比学习是视觉模型常用的预训练目标,也已被证明同时是视觉语言模型的高效预训练目标。近期的工作如 [CLIP](https://arxiv.org/abs/2103.00020)、[CLOOB](https://arxiv.org/abs/2110.11316)、[ALIGN](https://arxiv.org/abs/2102.05918) 和 [DeCLIP](https://arxiv.org/abs/2110.05208) 在 {图像,标题} 对组成的大型数据集上,通过使用对比损失函数联合训练文本编码器和图像编码器,从而桥接视觉和语言两个模态。对比学习旨在将输入图像和文本映射到相同的特征空间,使得图像 - 文本对的嵌入之间的距离在两者匹配时最小化,而在不匹配时最大化。 CLIP 仅采用文本和图像嵌入之间的余弦距离作为距离度量。而 ALIGN 和 DeCLIP 等模型则设计了自己的距离度量,这些距离在设计时考虑了数据集是有噪声的。 另一项工作 [LiT](https://arxiv.org/abs/2111.07991) 引入了一种冻结图像编码器而仅使用 CLIP 预训练目标来微调文本编码器的简单方法。作者将这个想法解释为 一种教文本编码器更好地读懂图像编码器生成的图像嵌入的方法。这种方法已被证明是有效的,并且比 CLIP 的样本效率更高。[FLAVA](https://arxiv.org/abs/2112.04482) 等其他工作将对比学习和其他预训练策略相结合来对齐视觉和语言嵌入。 ### 2) PrefixLM <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/prefixlm.png" alt="PrefixLM"><br> <em>展示 PrefixLM 预训练策略的图片 (<a ahref=https://ai.googleblog.com/2021/10/simvlm-simple-visual-language-model-pre.html>出处<a>)</em> </p> 另一种训练视觉语言模型的方法是使用 PrefixLM 目标。[SimVLM](https://arxiv.org/abs/2108.10904) 和 [VirTex](https://arxiv.org/abs/2006.06666v3) 等模型使用该预训练目标并使用一个统一的由 transformer 编码器和 transformer 解码器组成的多模态架构,有点类似于自回归语言模型。 让我们拆解一下,看看它是如何工作的。具有前缀目标的语言模型在给定输入文本作为前缀的情况下预测下一个词。例如,给定序列 “一个男人站在墙角”,我们可以使用” 一个男人站在” 作为前缀并训练模型以预测下一个词: 可以是 “墙角” 或另一个合理的补全词。 Visual transformers (ViT) 通过将每个图像划分为多个块 (patch) 并将这些块按顺序输入给模型,从而将相同的前缀概念应用于图像。利用这个想法,SimVLM 实现了这样一种架构,将图像块序列和前缀文本序列串接起来作为最终的前缀,输入给编码器,然后由解码器来预测该文本序列的接续文本。上图描述了该思想。SimVLM 模型首先在前缀中没有图像块的文本数据集上进行预训练,然后在对齐的图像文本数据集上进行预训练。这些模型用于图生文 / 图像标题生成和 VQA 任务。 利用统一的多模态架构将视觉信息融合到语言模型 (Language Model,LM) 中,最终生成的模型在图像引导类任务中显示出令人印象深刻的能力。然而,仅使用 PrefixLM 策略的模型在应用领域上可能会受到限制,因为它们主要为图像标题生成或视觉问答这两个下游任务而设计。例如,给定一组包含人的图像,我们通过图像的描述来查询符合描述的图像 (例如,“一群人站在一起微笑着站在建筑物前”) 或使用以下视觉推理问题来查询: “有多少人穿着红色 T 恤?” 图像。另一方面,学习多模态表示或采用混合方法的模型可以适用于各种其他下游任务,例如目标检测和图像分割。 #### 冻结 PrefixLM <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/frozen_prefixlm.png" alt="Frozen PrefixLM"><br> <em>冻结 PrefixLM 预训练策略 (<a href=https://lilianweng.github.io/posts/2022-06-09-vlm>图片出处</a>)</em> </p> 虽然将视觉信息融合到语言模型中非常有效,但能够使用预训练语言模型 (LM) 而无需微调会更有效。因此,视觉语言模型的另一个预训练目标是学习与冻结语言模型对齐的图像嵌入。 [Frozen](https://arxiv.org/abs/2106.13884)、[MAPL](https://arxiv.org/abs/2210.07179) 和 [ClipCap](https://arxiv.org/abs/2111.09734) 使用了冻结 PrefixLM 预训练目标。它们在训练时仅更新图像编码器的参数以生成图像嵌入,这些图像嵌入可以用作预训练的冻结语言模型的前缀,其方式与上面讨论的 PrefixLM 目标类似。Frozen 和 ClipCap 都在对齐的图像文本 (标题) 数据集上进行训练,目的是在给定图像嵌入和前缀文本的情况下生成标题中的下一个词。 最后,[Flamingo](https://arxiv.org/abs/2204.14198) 索性把预训练视觉编码器和语言模型都冻结了,并在一系列广泛的开放式视觉和语言任务上刷新了少样本学习的最高水平。Flamingo 通过在预训练的冻结视觉模型之上添加一个感知器重采样器 (Perceiver Resampler) 模块并在冻结的预训练 LM 层之间插入新的交叉注意层以根据视觉数据调节 LM 来达到这个性能。 冻结 PrefixLM 预训练目标的一个很好的优势是它可以使用有限的对齐图像文本数据进行训练,这对于那些没有对齐多模态数据集的领域特别有用。 ### 3) 多模态融合与交叉注意力 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/cross_attention_fusing.png" alt="Cross Attention Fusing" width=500><br> <em> 使用交叉注意力机制将视觉信息直接融合到语言模型中 (<a href=https://www.semanticscholar.org/paper/VisualGPT%3A-Data-efficient-Adaptation-of-Pretrained-Chen-Guo/616e0ed02ca024a8c1d4b86167f7486ea92a13d9>图片出处</a>)</em> </p> 将预训练语言模型用于多模态任务的另一种方法是使用交叉注意机制将视觉信息直接融合到语言模型解码器的层中,而不是使用图像作为语言模型的附加前缀。[VisualGPT](https://arxiv.org/abs/2102.10407)、[VC-GPT](https://arxiv.org/abs/2201.12723) 和 [Flamingo](https://arxiv.org/abs/2204.14198) 使用此预训练策略并在图像标题任务和视觉问答任务上进行训练。此类模型的主要目标是在把视觉信息融入文本生成能力时在这两者间取得高效的平衡,这在没有大型多模态数据集的情况下非常重要。 VisualGPT 等模型使用视觉编码器来生成图像嵌入,并将视觉嵌入提供给预训练语言解码器模块的交叉注意层,以生成合理的标题。最近的一项工作 [FIBER](http://arxiv.org/abs/2206.07643) 将具有门控机制的交叉注意力层插入到视觉和语言的主干模型中,以实现更高效的多模态融合,并使能各种其他下游任务,如图文互搜、开放域 (open-vocabulary) 目标检测等。 ### 4) 掩膜语言建模及图文匹配 另一派视觉语言模型把掩码语言建模 (MLM) 和图文匹配 (ITM) 目标组合起来使用,将图像的特定部分与文本对齐,并使能各种下游任务,例如视觉问答、视觉常识推理、文搜图以及文本引导的目标检测。遵循这种预训练设置的模型包括 [VisualBERT](https://arxiv.org/abs/1908.03557)、[FLAVA](https://arxiv.org/abs/2112.04482)、[ViLBERT](https://arxiv.org/abs/1908.02265)、[LXMERT](https://arxiv.org/abs/1908.07490) 和 [BridgeTower](https://arxiv.org/abs/2206.08657)。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/mlm_itm.png" alt="MLM / ITM"><br> <em> 将图像与文本按部分相应对齐 (<a href=https://arxiv.org/abs/1908.02265>图片出处</a>)</em> </p> 让我们解释一下 MLM 和 ITM 目标。给定一个部分遮盖的标题,MLM 的目标是根据相应的图像预测遮盖的单词。请注意,MLM 目标需要使用带有边界框的标注丰富的多模态数据集,或者使用目标检测模型为部分输入文本生成候选目标区域。 对于 ITM 目标,给定图像和标题对,任务是预测标题是否与图像匹配。负样本通常是从数据集中随机抽取的。MLM 和 ITM 目标通常在多模态模型的预训练期间结合使用。例如,VisualBERT 提出了一种类似 BERT 的架构,它使用预训练的目标检测模型 [Faster-RCNN](https://arxiv.org/abs/1506.01497) 来检测目标。VisualBERT 在预训练期间结合了 MLM 和 ITM 目标,通过自注意力机制隐式对齐输入文本的元素和相应输入图像中的区域。 另一项工作 FLAVA 由一个图像编码器、一个文本编码器和一个多模态编码器组成,用于融合和对齐图像和文本表示以进行多模态推理,所有这些都基于 transformers。为了实现这一点,FLAVA 使用了多种预训练目标: MLM、ITM,以及 掩膜图像建模 (Masked-Image Modeling,MIM) 和对比学习。 ### 5) 无训练 最后,各种优化策略旨在使用预训练的图像和文本模型来桥接图像和文本表示,或者使预训练的多模态模型能够在无需额外训练的情况下适应新的下游任务。 例如,[MaGiC](https://arxiv.org/abs/2205.02655) 提出通过预训练的自回归语言模型进行迭代优化,为输入图像生成标题。为此,MaGiC 使用生成的词的 CLIP 嵌入和输入图像的 CLIP 嵌入来计算基于 CLIP 的 “魔法分数 (magic score) ”。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/asif.png" alt="ASIF" width=500><br> <em> 用预训练的冻结的单模态图像和文本编码器创建一个相似性搜索空间 (<a href=https://luca.moschella.dev/publication/norelli-asif-2022>图片出处</a>)</em> </p> [ASIF](https://arxiv.org/abs/2210.01738) 提出了一种简单的方法,可以使用相对较小的多模态数据集将预训练的单模态图像和文本模型转换为多模态模型来用于图像标题生成,无需附加训练。ASIF 背后的关键直觉是相似图像的标题也彼此相似。因此,我们可以通过使用小型数据集里的真实多模态对的来构建一个相对表示空间,然后在该空间执行基于相似性的搜索。 ## 数据集 视觉语言模型通常根据预训练目标在结构各异的大型图像和文本数据集上进行训练。在对它们进行预训练后,再使用特定于任务的数据集进一步针对各种下游任务进行微调。本节概述了一些用于训练和评估视觉语言模型的流行的预训练和下游数据集。 ### 预训练数据集 一般来讲,我们从网上收集大量的多模态数据并将它们组织成图像 / 视频 - 文本对数据集。这些数据集中的文本数据可以是人工生成的标题、自动生成的标题、图像元数据或简单的目标类别标签。此类大型数据集有 [PMD](https://huggingface.co/datasets/facebook/pmd) 和 [LAION-5B](https://laion.ai/blog/laion-5b/) 等。PMD 数据集结合了多个较小的数据集,例如 [Flickr30K](https://www.kaggle.com/datasets/hsankesara/flickr-image-dataset)、[COCO](https://cocodataset.org/) 和 [Conceptual Captions](https://ai.google.com/research/ConceptualCaptions/) 数据集。COCO 检测和图像标题 (>330K 图像) 数据集分别由图像实例和其所含目标的文本标签及描述对组成。Conceptual Captions (> 3.3M images) 和 Flickr30K (> 31K images) 数据集中的图像以及它们的对应的用自然语言描述图像的标题都是从网上爬取的。 即使是那些人工生成标题的图像文本数据集 (例如 Flickr30K) 也存在固有的噪声,因为用户并不总是为其图像编写描述性或反应图像内容的标题。为了克服这个问题,LAION-5B 等数据集利用 CLIP 或其他预训练的多模态模型来过滤噪声数据并创建高质量的多模态数据集。此外,一些视觉语言模型,如 ALIGN,提出了进一步的预处理步骤并创建了自己的高质量数据集。还有些视觉语言数据集包含了视频和文本双模态,例如 [LSVTD](https://davar-lab.github.io/dataset/lsvtd.html) 和 [WebVid](https://github.com/m-bain/webvid) 数据集,虽然它们规模较小。 ### 下游数据集 预训练视觉语言模型通常还会针对各种下游任务进行训练,例如视觉问答、文本引导目标检测、文本引导图像修复、多模态分类以及各种独立的 NLP 和计算机视觉任务。 针对问答类下游任务进行微调的模型,例如 [ViLT](https://arxiv.org/abs/2102.03334) 和 [GLIP](https://arxiv.org/abs/2112.03857),一般使用 [VQA](https://visualqa.org/) (视觉问答) 、[VQA v2](https://visualqa.org/)、[NLVR2](https://lil.nlp.cornell.edu/nlvr/)、[OKVQA](https://okvqa.allenai.org/)、[TextVQA](https://huggingface.co/datasets/textvqa)、[TextCaps](https://textvqa.org/textcaps/) 和 [VizWiz](https://vizwiz.org/) 数据集。这些数据集的图像通常都配有多个开放式问题和答案。此外,VizWiz 和 TextCaps 等数据集也可用于图像分割和目标定位这些下游任务。其他一些有趣的多模态下游数据集有,用于多模态分类的 [Hateful Memes](https://huggingface.co/datasets/limjiayi/hateful_memes_expanded),用于视觉蕴含预测的 [SNLI-VE](https://github.com/necla-ml/SNLI-VE) for visual entailment prediction, and [Winoground](https://huggingface.co/datasets/facebook/winoground),以及用于视觉语言组合推理的 Winoground。 请注意,视觉语言模型也可用于各种经典的 NLP 和计算机视觉任务,例如文本或图像分类。此时,通常使用单模态数据集如 [SST2](https://huggingface.co/datasets/sst2)、[ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) 来完成此类下游任务。此外,[COCO](https://cocodataset.org/) 和 [Conceptual Captions](https://ai.google.com/research/ConceptualCaptions/) 等数据集也常用于预训练模型以及标题生成等下游任务。 ## 在 🤗 Transformers 中支持视觉语言模型 使用 Hugging Face Transformers,你可以轻松下载、运行和微调各种预训练视觉语言模型,或者混合搭配预训练视觉模型和预训练语言模型来搭建你自己的模型。🤗 Transformers 支持的一些视觉语言模型有: * [CLIP](https://huggingface.co/docs/transformers/model_doc/clip) * [FLAVA](https://huggingface.co/docs/transformers/main/en/model_doc/flava) * [GIT](https://huggingface.co/docs/transformers/main/en/model_doc/git) * [BridgeTower](https://huggingface.co/docs/transformers/main/en/model_doc/bridgetower) * [GroupViT](https://huggingface.co/docs/transformers/v4.25.1/en/model_doc/groupvit) * [BLIP](https://huggingface.co/docs/transformers/main/en/model_doc/blip) * [OWL-ViT](https://huggingface.co/docs/transformers/main/en/model_doc/owlvit) * [CLIPSeg](https://huggingface.co/docs/transformers/main/en/model_doc/clipseg) * [X-CLIP](https://huggingface.co/docs/transformers/main/en/model_doc/xclip) * [VisualBERT](https://huggingface.co/docs/transformers/main/en/model_doc/visual_bert) * [ViLT](https://huggingface.co/docs/transformers/main/en/model_doc/vilt) * [LiT](https://huggingface.co/docs/transformers/main/en/model_doc/vision-text-dual-encoder) (an instance of the `VisionTextDualEncoder`) * [TrOCR](https://huggingface.co/docs/transformers/main/en/model_doc/trocr) (an instance of the `VisionEncoderDecoderModel`) * [`VisionTextDualEncoder`](https://huggingface.co/docs/transformers/main/en/model_doc/vision-text-dual-encoder) * [`VisionEncoderDecoderModel`](https://huggingface.co/docs/transformers/main/en/model_doc/vision-encoder-decoder) 这里 CLIP、FLAVA、BridgeTower、BLIP、LiT 和 `VisionEncoderDecoder` 等模型会生成联合图像 - 文本嵌入,可用之于零样本图像分类等下游任务,而其他模型则针对有趣的下游任务进行训练。此外,FLAVA 是基于单模态和多模态两个预训练目标训练的,因此可用于单模态视觉或语言任务以及多模态任务。 例如,OWL-ViT [使能](https://huggingface.co/spaces/adirik/OWL-ViT) 了零样本 - 文本引导目标检测和单样本 - 图像引导目标检测任务,CLIPSeg 和 GroupViT [使能](https://huggingface.co/spaces/nielsr/CLIPSeg) 了文本和图像引导的图像分割任务,VisualBERT、GIT 和 ViLT [使能](https://huggingface.co/spaces/nielsr/vilt-vqa) 了视觉问答以及其他各种任务。X-CLIP 是一种使用视频和文本模态进行训练的多模态模型,它能够 [使能](https://huggingface.co/spaces/fcakyon/zero-shot-video-classification) 类似于 CLIP 的零样本图像分类的视频分类任务。 与其他模型不同,`VisionEncoderDecoderModel` 是一个标准化的模型,可用于初始化任意图像转文本模型,这类模型可以使用任何预训练的基于 Transformer 的视觉模型作为编码器 (例如 ViT、BEiT、DeiT、Swin) 以及任何预训练的语言模型作为解码器 (例如 RoBERTa、GPT2、BERT、DistilBERT)。事实上,TrOCR 是这个标准类的一个实例。 让我们继续试验其中的一些模型。我们将使用 [ViLT](https://huggingface.co/docs/transformers/model_doc/vilt) 进行视觉问答,使用 [CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg) 进行零样本图像分割。首先,我们要安装 🤗Transformers: pip install transformers。 ### 基于 ViLT 的 VQA 让我们从 ViLT 开始,下载一个在 VQA 数据集上预训练的模型。我们可以简单地初始化相应的模型类然后调用 `from_pretrained ()` 方法来下载想要的 checkpoint。 ```py from transformers import ViltProcessor, ViltForQuestionAnswering model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa") ``` 接下来,我们随便下载一张有两只猫的图像,并对该图像和我们的查询问题进行预处理,将它们转换为模型期望的输入格式。为此,我们可以方便地使用相应的预处理器类 (`ViltProcessor`) 并使用相应 checkpoint 的预处理配置对其进行初始化。 ```py import requests from PIL import Image processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") # download an input image url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) text = "How many cats are there?" # prepare inputs inputs = processor(image, text, return_tensors="pt") ``` 最后,我们可以使用预处理后的图像和问题作为输入进行推理,并打印出预测答案。但是,要牢记的重要一点是确保你的文本输入与训练时所用的问题模板相似。你可以参考 [论文和数据集](https://arxiv.org/abs/2102.03334) 来了解如何生成这些问题。 ```py import torch # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits idx = logits.argmax(-1).item() print("Predicted answer:", model.config.id2label[idx]) ``` 直截了当,对吧?让我们用 CLIPSeg 做另一个演示,看看我们如何用几行代码执行零样本图像分割。 ### 使用 CLIPSeg 做零样本图像分割 我们将从初始化 `CLIPSegForImageSegmentation` 及其相应的预处理类开始,并加载我们的预训练模型。 ```py from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") ``` 接下来,我们将使用相同的输入图像,并用描述待分割目标的文本来查询模型。与其他预处理器类似,`CLIPSegProcessor` 将输入转换为模型期望的格式。由于我们要分割多个目标,我们分别对每个描述文本都使用相同的输入图像。 ```py from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) texts = ["a cat", "a remote", "a blanket"] inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt") ``` 与 ViLT 类似,重要的是要参考 [原作](https://arxiv.org/abs/2112.10003),看看他们用什么样的文本提示来训练模型,以便在推理时获得最佳性能。虽然 CLIPSeg 在简单的对象描述 (例如 “汽车”) 上进行训练的,但其 CLIP 主干是在设计好的文本模板 (例如 “汽车图像”、“汽车照片”) 上预训练的,并在随后的训练中冻结。输入经过预处理后,我们可以执行推理以获得每个文本查询的二值分割图。 ```py import torch with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits print(logits.shape) >>> torch.Size([3, 352, 352]) ``` 让我们可视化一下结果,看看 CLIPSeg 的表现如何 (代码改编自 [这篇文章](https://huggingface.co/blog/clipseg-zero-shot))。 ```py import matplotlib.pyplot as plt logits = logits.unsqueeze(1) _, ax = plt.subplots(1, len(texts) + 1, figsize=(3*(len(texts) + 1), 12)) [a.axis('off') for a in ax.flatten()] ax[0].imshow(image) [ax[i+1].imshow(torch.sigmoid(logits[i][0])) for i in range(len(texts))]; [ax[i+1].text(0, -15, prompt) for i, prompt in enumerate(texts)] ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_vision_language_pretraining/clipseg_result.png" alt="CLIPSeg 结果"> </p> 太棒了,不是吗? 视觉语言模型支持大量有用且有趣的用例,并不仅限于 VQA 和零样本分割。我们鼓励你尝试将本节中提到的模型用于不同的应用。有关示例代码,请参阅模型的相应文档。 ## 新兴研究领域 伴随着视觉语言模型的巨大进步,我们看到了新的下游任务和应用领域的出现,例如医学和机器人技术。例如,视觉语言模型越来越多地被用于医疗,产生了诸如 [Clinical-BERT](https://ojs.aaai.org/index.php/AAAI/article/view/20204) 之类的工作来根据放射照片来进行医学诊断和报告生成,以及 [MedFuseNet](https://www.nature.com/articles/s41598-021-98390-1) 来用于医学领域的视觉问答。 我们还看到大量将联合视觉语言表示应用于各种领域的工作,如用于图像处理 (例如,[StyleCLIP](https://arxiv.org/abs/2103.17249)、[StyleMC](https://arxiv.org/abs/2112.08493),[DiffusionCLIP](https://arxiv.org/abs/2110.02711))、基于文本的视频检索 (例如,[X-CLIP](https://arxiv.org/abs/2207.07285)) 、基于文本的操作 (例如,[Text2Live](https://arxiv.org/abs/2204.02491) 以及 基于文本的 3D 形状和纹理操作 (例如,[AvatarCLIP](https://arxiv.org/abs/2205.08535),[CLIP-NeRF](https://arxiv.org/abs/2112.05139), [Latent3D](https://arxiv.org/abs/2202.06079), [CLIPFace](https://arxiv.org/abs/2212.01406), [Text2Mesh](https://arxiv.org/abs/2112.03221))。在类似的工作中,[MVT](https://arxiv.org/abs/2204.02174) 提出了一种联合 3D 场景 - 文本表示模型,可用于各种下游任务,例如 3D 场景补全。 虽然机器人研究尚未大规模利用视觉语言模型,但我们看到 [CLIPort](https://arxiv.org/abs/2109.12098) 等工作利用联合视觉语言表示进行端到端模仿学习,并宣称比之前的 SOTA 有了很大的改进。我们还看到,大型语言模型越来越多地被用于机器人任务,例如常识推理、导航和任务规划。例如,[ProgPrompt](https://arxiv.org/abs/2209.11302) 提出了一个使用大语言模型 (Large Language Model,LLM) 生成情境机器人任务计划的框架。同样,[SayCan](https://say-can.github.io/assets/palm_saycan.pdf) 使用 LLM 根据给定的环境及环境中物体的视觉描述,选择最合理的动作。尽管这些进展令人印象深刻,但由于目标检测数据集的限制,机器人研究仍然局限在有限的环境和目标集中。随着 [OWL-ViT](https://arxiv.org/abs/2205.06230) 和 [GLIP](https://arxiv.org/abs/2112.03857) 等开放集目标检测模型的出现,我们可以期待多模态模型与机器人导航、推理、操作和任务规划框架的集成会更紧密。 ## 结论 近年来,多模态模型取得了令人难以置信的进步,视觉语言模型在性能、用例以及应用的多样性方面取得了显著的飞跃。在这篇文章中,我们讨论了视觉语言模型的最新进展,可用的多模态数据集以及我们可以使用哪些预训练策略来训练和微调此类模型。我们还展示了如何将这些模型集成到 🤗 Transformers 中,以及如何使用它们通过几行代码来执行各种任务。 我们将继续集成最具影响力的计算机视觉和多模态模型,并希望收到你的回音。要了解多模态研究的最新消息,欢迎在 Twitter 上关注我们: [@adirik](https://twitter.com/https://twitter.com/alaradirik), [@NielsRogge](https://twitter.com/NielsRogge), [@apsdehal](https://twitter.com/apsdehal), [@a_e_roberts](https://twitter.com/a_e_roberts), [@RisingSayak](https://mobile.twitter.com/a_e_roberts), 和 [@huggingface](https://twitter.com/huggingface). *致谢: 我们感谢 Amanpreet Singh 和 Amy Roberts 的严格审查。此外,还要感谢 Niels Rogge、Younes Belkada 和 Suraj Patil,以及 Hugging Face 的许多其他人,他们为促进基于 Transformers 的多模态模型的使用奠定了基础。*
3
0
hf_public_repos/blog
hf_public_repos/blog/zh/_tags.yml
- value: analysis label: 分析和解读 - value: community label: 社区动态 - value: guide label: 教程 - value: open-source-collab label: 开源协作 - value: partnerships label: 合作伙伴 - value: research label: 科研相关 - value: nlp label: NLP - value: audio label: Audio - value: cv label: CV - value: rl label: RL - value: ethics label: AI 伦理 - value: diffusion label: 扩散模型 - value: game-dev label: 游戏开发
4
0
hf_public_repos/blog
hf_public_repos/blog/zh/leaderboard-bigcodebench.md
--- title: "BigCodeBench: 继 HumanEval 之后的新一代代码生成基准测试" thumbnail: /blog/assets/leaderboards-on-the-hub/thumbnail_bigcode.png authors: - user: terryyz guest: true org: bigcode - user: ganler guest: true org: bigcode - user: SivilTaram guest: true org: bigcode - user: huybery guest: true org: bigcode - user: Muennighoff guest: true org: bigcode - user: dpfried guest: true org: bigcode - user: harmdevries guest: true org: bigcode - user: lvwerra org: bigcode - user: clefourrier translators: - user: terryyz --- # BigCodeBench: 继 HumanEval 之后的新一代代码生成基准测试 [HumanEval](https://github.com/openai/human-eval) 是一个用于评估大型语言模型 (LLMs) 在代码生成任务中的参考基准测试,因为它使得对紧凑的函数级代码片段的评估变得容易。然而,关于其在评估 LLMs 编程能力方面的有效性越来越多的担忧,主要问题是HumanEval 中的任务太简单,可能不能代表真实世界的编程任务。相比于 HumanEval 中的算法导向任务,真实世界的软件开发通常涉及多样的库和函数调用。此外,LLMs 在 HumanEval 上的表现还受[污染和过拟合问题](https://arxiv.org/abs/2403.07974)的影响,这使得其在评估LLMs的泛化能力方面不够可靠。 虽然已经有一些努力来解决这些问题,但它们要么是特定领域的、确定性的,要么是以大模型代理为中心的(抱歉, [DS-1000](https://github.com/HKUNLP/DS-1000)、[ODEX](https://github.com/zorazrw/odex) 和 [SWE-bench](https://github.com/princeton-nlp/SWE-bench) 💔)。我们觉得社区仍然缺乏一个可以广泛评估 LLMs 编程能力的易用基准测试,这正是我们关注的重点。 我们很高兴宣布 BigCodeBench 的发布,它可以在没有污染的情况下评估LLMs解决实际和具有挑战性的编程任务的能力。具体来说,BigCodeBench 包含1140个函数级任务,挑战 LLMs 遵循指令并将来自139个库的多个函数调用作为工具进行组合。为了严格评估 LLMs,每个编程任务包含5.6个测试用例,平均分支覆盖率为99%。 准备好深入了解 BigCodeBench 了吗?让我们开始吧!🚀 ## BigCodeBench 中的任务是什么样的?🕵️‍♂️ <img src="https://github.com/bigcode-bench/bigcode-bench.github.io/blob/main/asset/tease.svg?raw=true" alt="task" style="display: block; margin-left: auto; margin-right: auto;"> BigCodeBench 为每个任务提供了复杂的、面向用户的指令,包括清晰的功能描述、输入/输出格式、错误处理和已验证的交互示例。我们避免逐步的任务指令,相信有能力的 LLMs 应该能够从用户的角度以开放的方式理解和解决任务。我们通过测试用例验证特定功能。 ```python # 我们用一些测试用例来详细说明上述任务: # 设置需求 import unittest from unittest.mock import patch import http.client import ssl import socket # 开始测试 class TestCases(unittest.TestCase): # 模拟成功连接并评估响应内容 @patch('http.client.HTTPSConnection') def test_response_content(self, mock_conn): """ 测试响应内容。 """ mock_conn.return_value.getresponse.return_value.read.return_value = b'Expected Content' result = task_func('www.example.com', 443, '/content/path') self.assertEqual(result, 'Expected Content') # 模拟连接失败并评估错误处理 @patch('socket.create_connection') @patch('http.client.HTTPSConnection') def test_ssl_handshake_error_handling(self, mock_conn, mock_socket): """ 测试 SSL 握手错误的处理。 """ mock_socket.side_effect = ssl.SSLError('SSL handshake failed') with self.assertRaises(ssl.SSLError): task_func('badssl.com', 443, '/test/path') # 更多测试用例... ``` BigCodeBench 中的任务利用了来自流行库的多样化函数调用。我们不限制 LLMs 可以使用的函数调用,期望它们选择适当的函数并灵活组合以解决任务。测试用例设计为测试框架,以在运行时检查预期的程序行为。 为了评估 LLMs 的表现,我们使用贪婪解码的 Pass@1,测量通过精心设计的测试用例生成的第一个代码片段正确解决任务的百分比。这个方法与[HumanEval](https://github.com/openai/human-eval)和[MBPP](https://github.com/google-research/google-research/tree/master/mbpp)等基准测试保持一致。我们通过在 Pass@1 评估期间添加缺失的设置(例如导入语句,全局常量)来解决 LLMs 跳过长代码提示的倾向,这被称为校准的 Pass@1。 <img src="https://github.com/bigcode-bench/bigcode-bench.github.io/blob/main/asset/depth-breadth.svg?raw=true" alt="comparison" style="display: block; margin-left: auto; margin-right: auto; width: 50%;"> 为了更好地理解实现的复杂性和工具使用的多样性,我们将 BigCodeBench 中的任务与代表性基准测试的任务进行了比较,包括[APPS](https://github.com/hendrycks/apps)、[DS-1000](https://github.com/HKUNLP/DS-1000)、[ODEX](https://github.com/zorazrw/odex)、[APIBench](https://github.com/ShishirPatil/gorilla/tree/main/data/apibench)、[MBPP](https://github.com/google-research/google-research/tree/master/mbpp)、[NumpyEval](https://github.com/microsoft/PyCodeGPT/tree/main/cert/pandas-numpy-eval)、[PandasEval](https://github.com/microsoft/PyCodeGPT/tree/main/cert/pandas-numpy-eval)、[HumanEval](https://github.com/openai/human-eval) 和 [TorchDataEval](https://github.com/microsoft/PyCodeGPT/tree/main/apicoder/private-eval)。我们发现 BigCodeBench 需要更复杂的推理和问题解决技能来实现全面的功能。 <img src="https://github.com/bigcode-bench/bigcode-bench.github.io/blob/main/asset/bigcodebench_prompt.svg?raw=true" alt="prompt" style="display: block; margin-left: auto; margin-right: auto; width: 70%;"> 如任务图所示,主要目标场景是代码完成(记为`BigCodeBench-Complete`),LLMs 需要根据文档字符串中的详细指令完成函数的实现。然而,考虑到下游应用程序如多轮对话,用户可能会以更对话化和不那么冗长的方式描述需求。这就是指令调整的 LLMs 有用的地方,因为它们经过训练可以遵循自然语言指令并相应地生成代码片段。为了测试模型是否真的能理解人类意图并将其转化为代码,我们创建了`BigCodeBench-Instruct`,这是 BigCodeBench 的一个更具挑战性的变体,旨在评估指令调整的LLMs。 ## 这些任务来自哪里?🤔 <img src="https://github.com/bigcode-bench/bigcode-bench.github.io/blob/main/asset/construct_pipeline.svg?raw=true" alt="png" style="display: block; margin-left: auto; margin-right: auto;"> 我们通过系统的“人类-LLM 协作过程”来保证 BigCodeBench 中任务的质量。我们以[ODEX](https://github.com/zorazrw/odex)作为“种子数据集”,其中包含了来自 Stack Overflow 的简短但现实的人工意图和相应的 Python 一行代码。我们使用 GPT-4 将这些一行代码扩展为全面的函数级任务。 接下来,20位拥有超过5年 Python 编程经验的志愿专家在基于执行的沙箱中指导 GPT-4。他们不断指示 GPT-4 完善生成的任务并添加测试用例。然后在本地环境中检查这些任务和测试用例,在其他 LLMs 上进行预评估,并由另外7位人类专家交叉检查以确保其质量。 为了确保整体质量,我们抽样了任务让11位人类专家解决,平均人类表现为97%。 ## LLMs 在 BigCodeBench 上的表现如何?📊 我们在[Hugging Face Space](https://huggingface.co/spaces/bigcode/bigcodebench-leaderboard)和[GitHub Pages](https://bigcode-bench.github.io/)上托管 BigCodeBench 排行榜。以下是 Hugging Face 排行榜的示例。 <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/4.36.1/gradio.js" ></script> <gradio-app theme_mode="light" space="bigcode/bigcodebench-leaderboard"></gradio-app> 有趣的是,我们观察到像 GPT-4 这样的指令调整LLMs在`BigCodeBench-Complete`的长提示中会省略必要的导入语句,导致由于缺少模块和常量而导致的任务失败。这种行为被称为“模型懒惰”,在[社区](https://community.openai.com/t/why-i-think-gpt-is-now-lazy/534332)中有讨论。 <u> 与人类表现相比,LLMs 在`BigCodeBench-Complete`上的表现显著低于人类表现,在`BigCodeBench-Instruct`上的表现甚至更低。</u>最佳模型(GPT-4o)在`BigCodeBench-Complete`上的校准 Pass@1 为61.1%,在`BigCodeBench-Instruct`上的校准Pass@1 为51.1%。此外,封闭式 LLMs 和开放式 LLMs 之间的表现差距显著。 虽然 Pass@1 是评估整体表现的好指标,但它不足以直接比较模型。受到[Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/)的启发,我们使用 Elo 评分来对`BigCodeBench-Complete`上的模型进行排名。该方法最初用于国际象棋,根据玩家的比赛表现进行排名。我们将其适应于编程任务,将每个任务视为一场比赛,每个模型视为一个玩家。Elo 评分更新基于比赛结果和预期,使用任务级校准 Pass@1(0%或100%),排除平局。我们从初始 Elo 评分1000开始,使用最大似然估计和500次自举来获得最终分数。<u>我们发现 GPT-4o 远远领先于其他模型,DeepSeekCoder-V2 位居第二梯队。</u> 为了帮助社区了解每个任务上的模型表现,我们跟踪解决率,通过校准 Pass@1 测量。在`BigCodeBench-Complete`上,149个任务被所有模型解决,而6个任务被完全解决。在`BigCodeBench-Instruct`上,278个任务未被解决,14个任务被所有模型完全解决。大量未解决的任务和少量完全解决的任务表明,BigCodeBench 对 LLMs 来说是一个具有挑战性的基准测试。 ## 太好了!那么,我如何在 BigCodeBench 上评估我的模型?🛠️ 我们通过提供一个简单易用的评估框架,使 BigCodeBench 对社区易于访问,可以通过[PyPI](https://pydigger.com/pypi/bigcodebench)下载。评估框架的原型基于[EvalPlus](https://github.com/evalplus/evalplus)用于 HumanEval+ 和 MBPP+ 基准测试。然而,由于我们的基准任务比 EvalPlus 有更多样的库依赖性,我们构建了资源约束更少的执行环境,并适应于 BigCodeBench的`unittest` 测试框架。 为了便于评估,我们提供了预构建的 Docker 镜像用于[_代码生成_](https://hub.docker.com/r/bigcodebench/bigcodebench-generate)和[_代码执行_](https://hub.docker.com/r/bigcodebench/bigcodebench-evaluate)。请查看我们的[GitHub仓库](https://github.com/bigcode-project/bigcodebench),了解如何使用评估框架的更多细节。 ### 设置 ```bash # 安装以使用bigcodebench.evaluate pip install bigcodebench --upgrade # 如果你想在本地使用 evaluate,你需要安装要求 pip install -I -r https://raw.githubusercontent.com/bigcode-project/bigcodebench/main/Requirements/requirements-eval.txt # 安装以使用 bigcodebench.generate # 强烈建议在单独的环境中安装[generate]依赖 pip install bigcodebench[generate] --upgrade ``` ### 代码生成 建议使用`flash-attn`生成代码样本。 ```bash pip install -U flash-attn ``` 要从模型生成代码样本,可以使用以下命令: ```bash bigcodebench.generate \ --model [model_name] \ --subset [complete|instruct] \ --greedy \ --bs [bs] \ --temperature [temp] \ --n_samples [n_samples] \ --resume \ --backend [vllm|hf|openai|mistral|anthropic|google] \ --tp [gpu_number] \ [--trust_remote_code] \ [--base_url [base_url]] ``` 生成的代码样本将存储在名为`[model_name]--bigcodebench-[instruct|complete]--[backend]-[temp]-[n_samples].jsonl`的文件中。 ### 代码后处理 LLM 生成的文本可能不是可编译代码,因为它包含自然语言行或不完整的额外代码。 我们提供一个名为`bigcodebench.sanitize`的工具来清理代码: ```bash # 💡 如果你想在jsonl中存储校准代码: bigcodebench.sanitize --samples samples.jsonl --calibrate # 校准后的代码将生成到`samples-sanitized-calibrated.jsonl` # 💡 如果你不进行校准: bigcodebench.sanitize --samples samples.jsonl # 清理后的代码将生成到`samples-sanitized.jsonl` # 💡 如果你将代码存储在目录中: bigcodebench.sanitize --samples /path/to/vicuna-[??]b_temp_[??] # 清理后的代码将生成到`/path/to/vicuna-[??]b_temp_[??]-sanitized` ``` ### 代码评估 强烈建议使用沙箱如[docker](https://docs.docker.com/get-docker/): ```bash # 将当前目录挂载到容器 docker run -v $(pwd):/app bigcodebench/bigcodebench-evaluate:latest --subset [complete|instruct] --samples samples-sanitized-calibrated # ...或者本地⚠️ bigcodebench.evaluate --subset [complete|instruct] --samples samples-sanitized-calibrated # ...如果地面真值在本地工作(由于一些不稳定的测试) bigcodebench.evaluate --subset [complete|instruct] --samples samples-sanitized-calibrated --no-gt ``` ## 接下来是什么? 我们分享一个长期路线图,以解决 BigCodeBench 的局限性,并与社区一起可持续发展。我们的目标是为社区提供最开放、最可靠和可扩展的评估,以真正了解 LLMs 在编程方面的基本能力,并找到释放其潜力的方法。具体来说,我们计划增强 BigCodeBench 的以下方面: - **多语言性**:目前,BigCodeBench 仅支持 Python,无法轻松扩展到其他编程语言。由于函数调用大多是特定于语言的,在 Python 以外的语言中找到具有相同功能的包或库是一个挑战。 - **严格性**:虽然我们在 BigCodeBench 的地面真值解决方案中实现了高测试覆盖率,但这并不能保证 LLMs 生成的所有代码解决方案都能正确评估现有的测试用例。以前的工作如 EvalPlus 尝试通过 LLM 和基于突变的策略扩展有限的测试用例。然而,将EvalPlus 适应于 BigCodeBench 的测试框架是一个挑战。尽管 EvalPlus 强调输入输出断言,BigCodeBench 中的大多数测试框架需要非平凡的配置(例如模拟修补)以在运行时检查预期的程序行为。 - **泛化性**:一个关键问题是,“模型在看不见的工具和任务上的泛化能力如何?”目前,BigCodeBench 涵盖了常见库和日常编程任务。在使用新兴库(如[transformers](https://github.com/huggingface/transformers)和[langchain](https://github.com/langchain-ai/langchain))的编程任务上对模型进行基准测试会更有趣。 - **演化**:库可能会变得过时或被更新,这意味着模型训练的数据会不断演变。模型可能不会记住过时库版本的函数调用,这对任何工具依赖的编程基准来说都是一个挑战,需要定期更新以正确评估模型能力。另一个相关问题是由于训练数据的演变导致的测试集污染。 - **交互**:最近的兴趣集中在 LLMs 作为代理的概念上,这被视为通向人工通用智能的途径。具体来说,LLMs 将在一个不受限制的沙箱环境中运行,在那里它们可以与网页浏览器和终端等应用程序进行交互。这种环境可以帮助解锁诸如[自我调试](https://arxiv.org/pdf/2304.05128)和[自我反思](https://arxiv.org/abs/2303.11366)等能力。 我们很期待看到社区的反馈和对长期建设 BigCodeBench 的贡献🤗 ## 资源 我们开源了 BigCodeBench 的所有工件,包括任务、测试用例、评估框架和排行榜。你可以在以下链接中找到它们: - [GitHub仓库](https://github.com/bigcode-project/bigcodebench) - [HF数据查看器](https://huggingface.co/spaces/bigcode/bigcodebench-viewer) - [HF数据集](https://huggingface.co/datasets/bigcode/bigcodebench) - [HF排行榜](https://huggingface.co/spaces/bigcode/bigcodebench-leaderboard) - [GitHub Pages排行榜](https://bigcode-bench.github.io/) 如果你有任何问题 或建议,请随时在仓库中提交问题或通过[[email protected]](mailto:[email protected])或[[email protected]](mailto:[email protected])联系我们。 ## 引用 如果你觉得我们的评估有用,请考虑引用我们的工作 ```bibtex @article{zhuo2024bigcodebench, title={BigCodeBench: Benchmarking Code Generation with Diverse Function Calls and Complex Instructions}, author={Zhuo, Terry Yue and Vu, Minh Chien and Chim, Jenny and Hu, Han and Yu, Wenhao and Widyasari, Ratnadira and Yusuf, Imam Nur Bani and Zhan, Haolan and He, Junda and Paul, Indraneil and others}, journal={arXiv preprint arXiv:2406.15877}, year={2024} } ```
5
0
hf_public_repos/blog
hf_public_repos/blog/zh/train-optimize-sd-intel.md
--- title: 基于 NNCF 和 🤗 Optimum 面向 Intel CPU 对 Stable Diffusion 优化 thumbnail: /blog/assets/train_optimize_sd_intel/thumbnail.png authors: - user: AlexKoff88 guest: true - user: MrOpenVINO guest: true - user: helenai guest: true - user: sayakpaul - user: echarlaix translators: - user: vermillion - user: zhongdongy proofreader: true --- # 基于 NNCF 和 🤗 Optimum 面向 Intel CPU 对 Stable Diffusion 优化 基于隐空间的扩散模型 (Latent Diffusion Model),是解决文本到图片生成问题上的颠覆者。Stable Diffusion 是最著名的一例,广泛应用在商业和工业。Stable Diffusion 的想法简单且有效: 从噪声向量开始,多次去噪,以使之在隐空间里逼近图片的表示。 但是,这样的方法不可避免地增加了推理时长,使客户端的体验大打折扣。众所周知,一个好的 GPU 总能有帮助,确实如此,但其损耗大大增加了。就推理而言,在 2023 年上半年 (H1’23),一个好 CPU 实例 ([r6i.2xlarge](https://aws.amazon.com/ec2/instance-types/r6g/),8 vCPUs ,64 GB 内存) 价格是 0.504 $/h,同时,类似地,一个好 GPU 实例 ([g4dn.2xlarge](https://aws.amazon.com/ec2/instance-types/g4/),NVIDIA T4,16 GB 内存) 价格是 0.75 $/h ,是前者的近 1.5 倍。 这就使图像生成的服务变得昂贵,无论持有者还是用户。该问题在面向用户端部署就更突出了: 可能没有 GPU 能用!这让 Stable Diffusion 的部署变成了棘手的问题。 在过去五年中,[OpenVINO](https://docs.openvino.ai/) 集成了许多高性能推理的特性。 其一开始为计算机视觉模型设计,现今仍在许多模型的推理性能上取得最佳表现,包括 [Stable Diffusion](https://huggingface.co/blog/zh/stable-diffusion-inference-intel)。然而,对资源有限型的应用,优化 Stable Diffusion 远不止运行时的。这也是 OpenVINO [NNCF](https://github.com/openvinotoolkit/nncf)(Neural Network Compression Framework) 发挥作用的地方。 在本博客中,我们将理清优化 Stable Diffusion 模型的问题,并提出对资源有限的硬件 (比如 CPU) 减负的流程。尤其是和 PyTorch 相比,我们速度提高了 5.1 倍,内存减少了 4 倍。 ## Stable Diffusion 的优化 在 Stable Diffusion 的 [管线](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview) 中,UNet 的运行是最计算昂贵的。因此,对模型的推理速度,针对 UNet 的优化能带来足够的效益。 然而事实表明,传统的模型优化方法如 8-bit 的后训练量化,对此不奏效。主要原因有两点: 其一,面向像素预测的模型,比如语义分割、超分辨率等,是模型优化上最复杂的,因为任务复杂,参数和结构的改变会导致无数种变数; 其二,模型的参数不是很冗余,因为其压缩了其数以千万计的 [数据集](https://laion.ai/blog/laion-5b/) 中的信息。这也是研究者不得不用更复杂的量化方法来保证模型优化后的精度。举例而言,高通 (Qualcomm) 用分层知识蒸馏 (layer-wise Knowledge Distillation) 方法 ([AdaRound](https://arxiv.org/abs/2004.10568)) 来 [量化](https://www.qualcomm.com/news/onq/2023/02/worlds-first-on-device-demonstration-of-stable-diffusion-on-android) Stable Diffusion。这意味着,无论如何,模型量化后的微调是必要的。既然如此,为何不用 量化感知的训练 ([Quantization-Aware Trainning, QAT](https://arxiv.org/abs/1712.05877)),其对原模型的微调和参数量化是同时进行的?因此,我们在本工作中,用 token 合并 ([Token Merging](https://arxiv.org/abs/2210.09461)) 方法结合 [NNCF](https://github.com/openvinotoolkit/nncf), [OpenVINO](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html) 和 [Diffusers](https://github.com/huggingface/diffusers) 实践了该想法。 ## 优化流程 我们通常从训练后的模型开始优化。在此,我们从宝可梦数据集 ([Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions),包含图片和对应的文本描述) 上微调的 [模型](https://huggingface.co/svjack/Stable-Diffusion-Pokemon-en)。 我们对 Stable Diffusion 用 Diffusers 中的 [图片 - 文本微调之例](https://huggingface.co/docs/diffusers/training/text2image),结合 NNCF 中的 QAT (参见训练的 [脚本](https://github.com/huggingface/optimum-intel/tree/main/examples/openvino/stable-diffusion))。我们同时改变了损失函数,以同时实现从源模型到部署模型的知识蒸馏。该方法与通常的知识蒸馏不同,后者是把源模型蒸馏到小些的模型。我们的方法主要将知识整理作为附加的方法,帮助提高最后优化的模型的精度。我们也用指数移动平均方法 (Exponential Moving Average, EMA) 让我们训练过程更稳定。我们仅对模型做 4096 次迭代。 基于一些技巧,比如梯度检查 (gradient checkpointing) 和 [保持 EMA 模型](https://github.com/huggingface/optimum-intel/blob/bbbe7ff0e81938802dbc1d234c3dcdf58ef56984/examples/openvino/stable-diffusion/train_text_to_image_qat.py#L941) 在内存 (RAM) 而不是虚拟内存 (VRAM) 中。整个优化过程能用一张 GPU 在一天内完成。 ## 量化感知的训练之外呢 ? 量化模型本身就能带来模型消耗、加载、内存、推理速度上的显著提高。但量化模型蛮大的优势在能和其他模型优化方法一起,达到加速的增益效果。 最近,Facebook Research 针对视觉 Transformer 模型,提出了一个 [Token Merging](https://arxiv.org/abs/2210.09461) 方法。该方法的本质是用现有的方法 (取平均、取最大值等) 把冗余的 token 和重要的 token 融合。这在 self-attention 块之前完成,后者是 Transformer 模型最消耗算力的部分。因此,减小 token 的跨度能减少 self-attention 块消耗的时间。该方法也已被 Stable Diffusion 模型 [采用](https://arxiv.org/pdf/2303.17604.pdf),并在面向 GPU 的高分辨率优化上有可观的表现。 我们改进了 Token Merging 方法,以便用 OpenVINO,并在注意力 UNet 模型上采用 8-bit 量化。这包含了上述含知识蒸馏等的所有技术。对量化而言,其需要微调,以保证数值精度。我们也从 [宝可梦数据集](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 上训练的 [模型](https://huggingface.co/svjack/Stable-Diffusion-Pokemon-en) 开始优化和微调。下图体现了总体的优化工作流程。 ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/train-optimize-sd-intel/overview.png) 结果的模型在有限资源的硬件上是高度有效的,如客户机或边缘 CPU。如上文所述,把 Token Merging 方法和量化方法叠加能带来额外的推理增益。 <div class="flex flex-row"> <div class="grid grid-cols-2 gap-4"> <figure> <img class="max-w-full rounded-xl border-2 border-solid border-gray-600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/train-optimize-sd-intel/image_torch.png" alt="Image 1" /> <figcaption class="mt-2 text-center text-sm text-gray-500">PyTorch FP32,推理时长:230.5 秒,内存消耗:3.44 GB</figcaption> </figure> <figure> <img class="max-w-full rounded-xl border-2 border-solid border-gray-600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/train-optimize-sd-intel/image_fp32.png" alt="Image 2" /> <figcaption class="mt-2 text-center text-sm text-gray-500">OpenVINO FP32,推理时长:120 秒 (<b>1.9 倍</b>),内存消耗:3.44 GB</figcaption> </figure> <figure> <img class="max-w-full rounded-xl border-2 border-solid border-gray-600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/train-optimize-sd-intel/image_quantized.png" alt="Image 3" /> <figcaption class="mt-2 text-center text-sm text-gray-500">OpenVINO 8-bit,推理市场:59 秒 (<b>3.9 倍</b>),内存消耗:0.86 GB(<b>0.25 倍</b>)</figcaption> </figure> <figure> <img class="max-w-full rounded-xl border-2 border-solid border-gray-600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/train-optimize-sd-intel/image_tome_quantized.png" alt="Image 4" /> <figcaption class="mt-2 text-center text-sm text-gray-500">ToMe + OpenVINO 8-bit, 推理速度:44.6 秒 (<b>5.1 倍</b>),内存消耗:0.86 GB (<b>0.25 倍</b>)</figcaption> </figure> </div> </div> 用不同模型优化方法的图片生成的结果 [展示](https://huggingface.co/spaces/AlexKoff88/stable_diffusion)。输入提示词为 “cartoon bird”,随机种子为 42。模型用 OpenVINO 2022.3,来自 [Hugging Face Space](https://huggingface.co/docs/hub/spaces-overview),用“CPU 升级”的实例: 第三代 Intel® Xeon® Scalable Processors,和 Intel® 深度学习加速技术。 ## 结果 我们用优化模型不完整的流程以得到两种模型: 基于 8-bit 量化的和基于 Token Merging 量化的,并和 PyTorch 作为基准比较。我们也把基准先转化成 vanilla OpenVINO (FP32) 的模型,以用以分析性比较。 上面的结果图展示了图像生成和部分模型的特性。如你所见,仅转化成 OpenVINO 就带来大的推理速度提高 ( **1.9** 倍)。用基于 8-bit 的量化加速和 PyTorch 相比带来了 **3.9** 倍的推理速度。量化的另外一个重要提高在于内存消耗减少,0.25 倍之于 PyTorch,同时也提高了加载速度。在量化之上应用 Token Merging (ToME) (**融合比为 0.4**) 带来了 **5.1 倍** 的提速,同时把模型内存消耗保持在原水平上。我们不提供输出结果上的质量改变,但如你所见,结果还是有质量的。 下面我们展示将最终优化结果部署在 Intel CPU 上代码。 ```python from optimum.intel.openvino import OVStableDiffusionPipeline # Load and compile the pipeline for performance. name = "OpenVINO/stable-diffusion-pokemons-tome-quantized-aggressive" pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False) pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1) pipe.compile() # Generate an image. prompt = "a drawing of a green pokemon with red eyes" output = pipe(prompt, num_inference_steps=50, output_type="pil").images[0] output.save("image.png") ``` 在 Hugging Face [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/index) 库中你可以找到训练和量化 [代码](https://github.com/huggingface/optimum-intel/tree/main/examples/openvino/stable-diffusion)。比较优化过的和原模型的 notebook 代码在 [这里](https://github.com/huggingface/optimum-intel/blob/main/notebooks/openvino/stable_diffusion_optimization.ipynb)。你可以在 Hugging Face Hub 上找到 [OpenVINO](https://huggingface.co/OpenVINO) 下的 [许多模型](https://huggingface.co/models?library=openvino&sort=downloads)。另外,我们在 Hugging Face Spaces 上建了一个 [demo](https://huggingface.co/spaces/AlexKoff88/stable_diffusion),以运行 [带第三代 Intel Xeon Scalable 的 r6id.2xlarge 实例]。 ## 一般的 Stable Diffusion 模型呢? 正如我们在宝可梦图像生成任务中展现的一样,仅用小量的训练资源,对 Stable Diffusion 管线实现高层次的优化是可能的。同时,众所周知,训练一般的 Stable Diffusion 模型是一个 [昂贵的任务](https://www.mosaicml.com/blog/training-stable-diffusion-from-scratch-part-2)。但是,有充足的资金和硬件资源,用上述方法优化一般的模型生成高分辨率的模型是可能的。我们唯一的警告是关于 Token Merging 方法,其会减弱模型容忍性。这里衡量标准是,训练数据越复杂,优化模型时的融合比就该越小。 如果你乐于读本博客,那你可能对另外一篇 [博客](https://huggingface.co/blog/zh/stable-diffusion-inference-intel) 感兴趣,它讨论了在第四代 Intel Xeon CPU 上其他互补的 Stable Diffusion 模型优化方法。
6
0
hf_public_repos/blog
hf_public_repos/blog/zh/rlhf.md
--- title: "ChatGPT 背后的“功臣”——RLHF 技术详解" thumbnail: /blog/assets/120_rlhf/thumbnail.png authors: - user: natolambert - user: LouisCastricato guest: true - user: lvwerra - user: Dahoas guest: true translators: - user: hell0w0r1d - user: inferjay proofreader: true --- # ChatGPT 背后的“功臣”——RLHF 技术详解 OpenAI 推出的 ChatGPT 对话模型掀起了新的 AI 热潮,它面对多种多样的问题对答如流,似乎已经打破了机器和人的边界。这一工作的背后是大型语言模型 (Large Language Model,LLM) 生成领域的新训练范式:RLHF (Reinforcement Learning from Human Feedback) ,即以强化学习方式依据人类反馈优化语言模型。 过去几年里各种 LLM 根据人类输入提示 (prompt) 生成多样化文本的能力令人印象深刻。然而,对生成结果的评估是主观和依赖上下文的,例如,我们希望模型生成一个有创意的故事、一段真实的信息性文本,或者是可执行的代码片段,这些结果难以用现有的基于规则的文本生成指标 (如 [BLEU](https://en.wikipedia.org/wiki/BLEU) 和 [ROUGE](https://en.wikipedia.org/wiki/ROUGE_(metric))) 来衡量。除了评估指标,现有的模型通常以预测下一个单词的方式和简单的损失函数 (如交叉熵) 来建模,没有显式地引入人的偏好和主观意见。 如果我们 **用生成文本的人工反馈作为性能衡量标准,或者更进一步用该反馈作为损失来优化模型**,那不是更好吗?这就是 RLHF 的思想:使用强化学习的方式直接优化带有人类反馈的语言模型。RLHF 使得在一般文本数据语料库上训练的语言模型能和复杂的人类价值观对齐。 看看 [ChatGPT](https://openai.com/blog/chatgpt/) 是如何解释 RLHF 的: <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rlhf/chatgpt-explains.png" width="500" /> </p> ChatGPT 解释的很好,但还没有完全讲透;让我们更具体一点吧! # RLHF 技术分解 RLHF 是一项涉及多个模型和不同训练阶段的复杂概念,这里我们按三个步骤分解: 1. 预训练一个语言模型 (LM) ; 2. 聚合问答数据并训练一个奖励模型 (Reward Model,RM) ; 3. 用强化学习 (RL) 方式微调 LM。 ### Step 1. 预训练语言模型 首先,我们使用经典的预训练目标训练一个语言模型。对这一步的模型,OpenAI 在其第一个流行的 RLHF 模型 [InstructGPT](https://openai.com/blog/instruction-following/) 中使用了较小版本的 GPT-3; Anthropic 使用了 1000 万 ~ 520 亿参数的 Transformer 模型进行训练;DeepMind 使用了自家的 2800 亿参数模型 [Gopher](https://arxiv.org/abs/2112.11446)。 这里可以用额外的文本或者条件对这个 LM 进行微调,例如 OpenAI 对 “更可取” (preferable) 的人工生成文本进行了微调,而 Anthropic 按 “有用、诚实和无害” 的标准在上下文线索上蒸馏了原始的 LM。这里或许使用了昂贵的增强数据,但并不是 RLHF 必须的一步。由于 RLHF 还是一个尚待探索的领域,对于” 哪种模型” 适合作为 RLHF 的起点并没有明确的答案。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rlhf/pretraining.png" width="500" /> </p> 接下来,我们会基于 LM 来生成训练 **奖励模型** (RM,也叫偏好模型) 的数据,并在这一步引入人类的偏好信息。 ### Step 2. 训练奖励模型 RM 的训练是 RLHF 区别于旧范式的开端。这一模型接收一系列文本并返回一个标量奖励,数值上对应人的偏好。我们可以用端到端的方式用 LM 建模,或者用模块化的系统建模 (比如对输出进行排名,再将排名转换为奖励) 。这一奖励数值将对后续无缝接入现有的 RL 算法至关重要。 关于模型选择方面,RM 可以是另一个经过微调的 LM,也可以是根据偏好数据从头开始训练的 LM。例如 Anthropic 提出了一种特殊的预训练方式,即用偏好模型预训练 (Preference Model Pretraining,PMP) 来替换一般预训练后的微调过程。因为前者被认为对样本数据的利用率更高。但对于哪种 RM 更好尚无定论。 关于训练文本方面,RM 的提示 - 生成对文本是从预定义数据集中采样生成的,并用初始的 LM 给这些提示生成文本。Anthropic 的数据主要是通过 Amazon Mechanical Turk 上的聊天工具生成的,并在 Hub 上 [可用](https://huggingface.co/datasets/Anthropic/hh-rlhf),而 OpenAI 使用了用户提交给 GPT API 的 prompt。 关于训练奖励数值方面,这里需要人工对 LM 生成的回答进行排名。起初我们可能会认为应该直接对文本标注分数来训练 RM,但是由于标注者的价值观不同导致这些分数未经过校准并且充满噪音。通过排名可以比较多个模型的输出并构建更好的规范数据集。 对具体的排名方式,一种成功的方式是对不同 LM 在相同提示下的输出进行比较,然后使用 [Elo](https://en.wikipedia.org/wiki/Elo_rating_system) 系统建立一个完整的排名。这些不同的排名结果将被归一化为用于训练的标量奖励值。 这个过程中一个有趣的产物是目前成功的 RLHF 系统使用了和生成模型具有 不同 大小的 LM (例如 OpenAI 使用了 175B 的 LM 和 6B 的 RM,Anthropic 使用的 LM 和 RM 从 10B 到 52B 大小不等,DeepMind 使用了 70B 的 Chinchilla 模型分别作为 LM 和 RM) 。一种直觉是,偏好模型和生成模型需要具有类似的能力来理解提供给它们的文本。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rlhf/reward-model.png" width="600" /> </p> 接下来是最后一步:利用 RM 输出的奖励,用强化学习方式微调优化 LM。 ### Step 3. 用强化学习微调 长期以来出于工程和算法原因,人们认为用强化学习训练 LM 是不可能的。而目前多个组织找到的可行方案是使用策略梯度强化学习 (Policy Gradient RL) 算法、近端策略优化 (Proximal Policy Optimization,PPO) 微调初始 LM 的部分或全部参数。因为微调整个 10B~100B+ 参数的成本过高 (相关工作参考低秩适应 [LoRA](https://arxiv.org/abs/2106.09685) 和 DeepMind 的 [Sparrow](https://arxiv.org/abs/2209.14375) LM) 。PPO 算法已经存在了相对较长的时间,有大量关于其原理的指南,因而成为 RLHF 中的有利选择。 事实证明,RLHF 的许多核心 RL 进步一直在弄清楚如何将熟悉的 RL 算法应用到更新如此大的模型。 让我们首先将微调任务表述为 RL 问题。首先,该 **策略** (policy) 是一个接受提示并返回一系列文本 (或文本的概率分布) 的 LM。这个策略的 **行动空间** (action space) 是 LM 的词表对应的所有词元 (一般在 50k 数量级) ,**观察空间** (observation space) 是可能的输入词元序列,也比较大 (词汇量 ^ 输入标记的数量) 。**奖励函数** 是偏好模型和策略转变约束 (Policy shift constraint) 的结合。 PPO 算法确定的奖励函数具体计算如下:将提示 *x* 输入初始 LM 和当前微调的 LM,分别得到了输出文本 *y1*, *y2*,将来自当前策略的文本传递给 RM 得到一个标量的奖励 \\( r_\theta \\)。将两个模型的生成文本进行比较计算差异的惩罚项,在来自 OpenAI、Anthropic 和 DeepMind 的多篇论文中设计为输出词分布序列之间的 Kullback–Leibler [(KL) divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) 散度的缩放,即 \\( r = r_\theta - \lambda r_\text{KL} \\) 。这一项被用于惩罚 RL 策略在每个训练批次中生成大幅偏离初始模型,以确保模型输出合理连贯的文本。如果去掉这一惩罚项可能导致模型在优化中生成乱码文本来愚弄奖励模型提供高奖励值。此外,OpenAI 在 InstructGPT 上实验了在 PPO 添加新的预训练梯度,可以预见到奖励函数的公式会随着 RLHF 研究的进展而继续进化。 最后根据 PPO 算法,我们按当前批次数据的奖励指标进行优化 (来自 PPO 算法 on-policy 的特性) 。PPO 算法是一种信赖域优化 (Trust Region Optimization,TRO) 算法,它使用梯度约束确保更新步骤不会破坏学习过程的稳定性。DeepMind 对 Gopher 使用了类似的奖励设置,但是使用 A2C ([synchronous advantage actor-critic](http://proceedings.mlr.press/v48/mniha16.html?ref=https://githubhelp.com)) 算法来优化梯度。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rlhf/rlhf.png" width="650" /> </p> 作为一个可选项,RLHF 可以通过迭代 RM 和策略共同优化。随着策略模型更新,用户可以继续将输出和早期的输出进行合并排名。Anthropic 在他们的论文中讨论了 [迭代在线 RLHF](https://arxiv.org/abs/2204.05862),其中策略的迭代包含在跨模型的 Elo 排名系统中。这样引入策略和 RM 演变的复杂动态,代表了一个复杂和开放的研究问题。 # Open-source tools for RLHF Today, there are already a few active repositories for RLHF in PyTorch that grew out of this. The primary repositories are Transformers Reinforcement Learning ([TRL](https://github.com/lvwerra/trl)), [TRLX](https://github.com/CarperAI/trlx) which originated as a fork of TRL, and Reinforcement Learning for Language models ([RL4LMs](https://github.com/allenai/RL4LMs)). TRL is designed to fine-tune pretrained LMs in the Hugging Face ecosystem with PPO. TRLX is an expanded fork of TRL built by [CarperAI](https://carper.ai/) to handle larger models for online and offline training. At the moment, TRLX has an API capable of production-ready RLHF with PPO and Implicit Language Q-Learning [ILQL](https://sea-snell.github.io/ILQL_site/) at the scales required for LLM deployment (e.g. 33 billion parameters). Future versions of TRLX will allow for language models up to 200B parameters. As such, interfacing with TRLX is optimized for machine learning engineers with experience at this scale. [RL4LMs](https://github.com/allenai/RL4LMs) offers building blocks for fine-tuning and evaluating LLMs with a wide variety of RL algorithms (PPO, NLPO, A2C and TRPO), reward functions and metrics. Moreover, the library is easily customizable, which allows training of any encoder-decoder or encoder transformer-based LM on any arbitrary user-specified reward function. Notably, it is well-tested and benchmarked on a broad range of tasks in [recent work](https://arxiv.org/abs/2210.01241) amounting up to 2000 experiments highlighting several practical insights on data budget comparison (expert demonstrations vs. reward modeling), handling reward hacking and training instabilities, etc. RL4LMs current plans include distributed training of larger models and new RL algorithms. Both TRLX and RL4LMs are under heavy further development, so expect more features beyond these soon. There is a large [dataset](https://huggingface.co/datasets/Anthropic/hh-rlhf) created by Anthropic available on the Hub. # RLHF 的未来 尽管 RLHF 取得了一定的成果和关注,但依然存在局限。这些模型依然会毫无不确定性地输出有害或者不真实的文本。这种不完美也是 RLHF 的长期挑战和动力 —— 在人类的固有领域中运行意味着永远不会到达一个完美的标准。 收集人类偏好数据的质量和数量决定了 RLHF 系统性能的上限。RLHF 系统需要两种人类偏好数据:人工生成的文本和对模型输出的偏好标签。生成高质量回答需要雇佣兼职人员 (而不能依赖产品用户和众包) 。另一方面,训练 RM 需要的奖励标签规模大概是 50k 左右,所以并不那么昂贵 (当然远超了学术实验室的预算) 。目前相关的数据集只有一个基于通用 LM 的 RLHF 数据集 (来自 [Anthropic](https://huggingface.co/datasets/Anthropic/hh-rlhf) 和几个较小的子任务数据集 (如来自 [OpenAI](https://github.com/openai/summarize-from-feedback) 的摘要数据集) 。另一个挑战来自标注者的偏见。几个人类标注者可能有不同意见,导致了训练数据存在一些潜在差异。 除开数据方面的限制,一些有待开发的设计选项可以让 RLHF 取得长足进步。例如对 RL 优化器的改进方面,PPO 是一种较旧的算法,但目前没有什么结构性原因让其他算法可以在现有 RLHF 工作中更具有优势。另外,微调 LM 策略的一大成本是策略生成的文本都需要在 RM 上进行评估,通过离线 RL 优化策略可以节约这些大模型 RM 的预测成本。最近,出现了新的 RL 算法如隐式语言 Q 学习 (Implicit Language Q-Learning,[ILQL](https://sea-snell.github.io/ILQL_site/)) 也适用于当前 RL 的优化。在 RL 训练过程的其他核心权衡,例如探索和开发 (exploration-exploitation) 的平衡也有待尝试和记录。探索这些方向至少能加深我们对 RLHF 的理解,更进一步提升系统的表现。 ### 参考资料 首先介绍一些相关的开源工作: 关于 [RLHF 的第一个项目](https://github.com/openai/lm-human-preferences),来自 OpenAI, 一些 PyTorch 的 repo: * [trl](https://github.com/lvwerra/trl) * [trlx](https://github.com/CarperAI/trlx) * [RL4LMs](https://github.com/allenai/RL4LMs) 此外,Huggingface Hub 上有一个由 Anthropic 创建的大型 [数据集](https://hf.co/datasets/Anthropic/hh-rlhf)。 相关论文包括在现有 LM 前的 RLHF 进展和基于当前 LM 的 RLHF 工作: - [TAMER: Training an Agent Manually via Evaluative Reinforcement](https://www.cs.utexas.edu/~pstone/Papers/bib2html-links/ICDL08-knox.pdf) (Knox and Stone 2008) - [Interactive Learning from Policy-Dependent Human Feedback](http://proceedings.mlr.press/v70/macglashan17a/macglashan17a.pdf) (MacGlashan et al. 2017) - [Deep Reinforcement Learning from Human Preferences](https://proceedings.neurips.cc/paper/2017/hash/d5e2c0adad503c91f91df240d0cd4e49-Abstract.html) (Christiano et al. 2017) - [Deep TAMER: Interactive Agent Shaping in High-Dimensional State Spaces](https://ojs.aaai.org/index.php/AAAI/article/view/11485) - [Fine-Tuning Language Models from Human Preferences](https://arxiv.org/abs/1909.08593) (Zieglar et al. 2019) - [Learning to summarize with human feedback](https://proceedings.neurips.cc/paper/2020/hash/1f89885d556929e98d3ef9b86448f951-Abstract.html) (Stiennon et al., 2020) - [Recursively Summarizing Books with Human Feedback](https://arxiv.org/abs/2109.10862) (OpenAI Alignment Team 2021) - [WebGPT: Browser-assisted question-answering with human feedback](https://arxiv.org/abs/2112.09332) (OpenAI, 2021) - InstructGPT: [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) (OpenAI Alignment Team 2022) - [InstructGPT: Training language models to follow instructions with human feedback (OpenAI Alignment Team 2022)](https://openai.com/blog/instruction-following/) - GopherCite: [Teaching language models to support answers with verified quotes](https://www.deepmind.com/publications/gophercite-teaching-language-models-to-support-answers-with-verified-quotes) (Menick et al. 2022) - Sparrow: [Improving alignment of dialogue agents via targeted human judgements](https://arxiv.org/abs/2209.14375) (Glaese et al. 2022) - [ChatGPT: Optimizing Language Models for Dialogue](https://openai.com/blog/chatgpt/) (OpenAI 2022) - [Scaling Laws for Reward Model Overoptimization](https://arxiv.org/abs/2210.10760) (Gao et al. 2022) - [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862) (Anthropic, 2022) - [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://arxiv.org/abs/2209.07858) (Ganguli et al. 2022) - [Dynamic Planning in Open-Ended Dialogue using Reinforcement Learning](https://arxiv.org/abs/2208.02294) (Cohen at al. 2022) - [Is Reinforcement Learning (Not) for Natural Language Processing?: Benchmarks, Baselines, and Building Blocks for Natural Language Policy Optimization](https://arxiv.org/abs/2210.01241) (Ramamurthy and Ammanabrolu et al. 2022) - [Kojima et al. 2021](https://arxiv.org/abs/2108.04812) - [Suhr and Artzi 2022](https://arxiv.org/abs/2212.09710) - [Sokolov et al. 2016](https://arxiv.org/abs/1601.04468), [Gao et al. 2022](https://arxiv.org/abs/2203.10079) * [Ranzato et al. 2015](https://arxiv.org/abs/1511.06732) * [Bahdanau et al. 2016](https://arxiv.org/abs/1607.07086) * [Nguyen et al. 2017](https://arxiv.org/abs/1707.07402) ## Citation If you found this useful for your academic work, please consider citing our work, in text: ``` Lambert, et al., "Illustrating Reinforcement Learning from Human Feedback (RLHF)", Hugging Face Blog, 2022. ``` BibTeX citation: ``` @article{lambert2022illustrating, author = {Lambert, Nathan and Castricato, Louis and von Werra, Leandro and Havrilla, Alex}, title = {Illustrating Reinforcement Learning from Human Feedback (RLHF)}, journal = {Hugging Face Blog}, year = {2022}, note = {https://huggingface.co/blog/rlhf}, } ``` *Thanks to [Robert Kirk](https://robertkirk.github.io/) for fixing some factual errors regarding specific implementations of RLHF. Thanks to [Peter Stone](https://www.cs.utexas.edu/~pstone/), [Khanh X. Nguyen](https://machineslearner.com/) and [Yoav Artzi](https://yoavartzi.com/) for helping expand the related works further into history. * *Thanks to Stas Bekman for fixing some typos or confusing phrases.*
7
0
hf_public_repos/blog
hf_public_repos/blog/zh/controlnet.md
--- title: "使用 🧨 Diffusers 实现 ControlNet 高速推理" thumbnail: /blog/assets/controlnet/thumbnail.png authors: - user: sayakpaul - user: yiyixu - user: patrickvonplaten translators: - user: SuSung-boy --- # 使用 🧨 Diffusers 实现 ControlNet 高速推理 <a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> 自从 Stable Diffusion 风靡全球以来,人们一直在寻求如何更好地控制生成过程的方法。ControlNet 提供了一个简单的迁移学习方法,能够允许用户在很大程度上自定义生成过程。通过 [ControlNet](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet),用户可以轻松地使用多种空间语义条件信息 (例如深度图、分割图、涂鸦图、关键点等) 来控制生成过程。 具体来说,我们可以将卡通绘图转化为逼真的照片,同时保持极佳的布局连贯性。 <table> <tr style="text-align: center;"> <th>Realistic Lofi Girl</th> </tr> <tr> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/lofi.jpg" width=300 /></td> </tr> </table> 进行室内设计。 <table> <tr style="text-align: center;"> <th>Before</th> <th>After</th> </tr> <tr> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/house_depth.png" width=300/></td> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/house_after.jpeg" width=300/></td> </tr> </table> 将涂鸦草图变成艺术作品。 <table> <tr style="text-align: center;"> <th>Before</th> <th>After</th> </tr> <tr> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/drawing_before.png" width=300/></td> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/drawing_after.jpeg" width=300/></td> </tr> </table> 甚至拟人化著名的 logo 形象。 <table> <tr style="text-align: center;"> <th>Before</th> <th>After</th> </tr> <tr> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/starbucks_logo.jpeg" width=300/></td> <td><img class="mx-auto" src="https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/starbucks_after.png" width=300/></td> </tr> </table> ControlNet,使一切皆有可能 🌠 本文的主要内容: - 介绍 [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) - 展示多种控制条件样例 让我们开启控制之旅! ## ControlNet 简述 ControlNet 在 [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) 一文中提被出,作者是 Lvmin Zhang 和 Maneesh Agrawala。它引入了一个框架,支持在扩散模型 (如 Stable Diffusion) 上附加额外的多种空间语义条件来控制生成过程。Diffusers 实现从原始 [源代码](https://github.com/lllyasviel/ControlNet/) 演化而来。 训练 ControlNet 包括以下步骤: 1. 克隆扩散模型的预训练参数 (文中称为 可训练副本, trainable copy。如 Stable Diffusion 的 latent UNet 部分),同时保留原本的预训练参数 (文中称为 锁定副本, locked copy)。这样可以实现: a) 让锁定副本保留从大型数据集中学到的丰富知识;b) 让可训练副本学习特定任务的知识。 2. 可训练副本和锁定副本的参数通过 “零卷积” 层 (详见 [此处](https://github.com/lllyasviel/ControlNet#controlnet) for more information)) 连接。“零卷积” 层是 ControlNet 框架的一部分,会在特定任务中优化参数。这是一种训练技巧,可以在新任务条件训练时保留已冻结模型已经学到的语义信息。 训练 ControlNet 的过程如图所示: <p align="center"> <img src="https://github.com/lllyasviel/ControlNet/raw/main/github_page/sd.png" alt="controlnet-structure"><br> <em>The diagram is taken from <a href=https://github.com/lllyasviel/ControlNet/blob/main/github_page/sd.png>here</a>.</em> </p> ControlNet 训练集中的其中一种样例如下 (额外的控制条件是 Canny 边缘图): <table> <tr style="text-align: center;"> <th>Prompt</th> <th>Original Image</th> <th>Conditioning</th> </tr> <tr style="text-align: center;"> <td style="vertical-align: middle">"bird"</td> <td><img class="mx-auto" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/original_bird.png" width=200/></td> <td><img class="mx-auto" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/canny_map.png" width=200/></td> </tr> </table> 同样地,如果我们使用的额外控制条件是语义分割图,那么 ControlNet 训练集的样例就是这样: <table> <tr style="text-align: center;"> <th>Prompt</th> <th>Original Image</th> <th>Conditioning</th> </tr> <tr style="text-align: center;"> <td style="vertical-align: middle">"big house"</td> <td><img class="mx-auto" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/original_house.png" width=300/></td> <td><img class="mx-auto" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/segmentation_map.png" width=300/></td> </tr> </table> 每对 ControlNet 施加一种额外的控制条件,都需要训练一份新的可训练副本参数。论文中提出了 8 种不同的控制条件,对应的控制模型在 Diffusers 中 [均已支持](https://huggingface.co/lllyasviel?search=controlnet)! 推理阶段需要同时使用扩散模型的预训练权重以及训练过的 ControlNet 权重。如要使用 Stable Diffusion v1-5 以及其 ControlNet 权重推理,其参数量要比仅使用 [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) 多大约 7 亿个,因此推理 ControlNet 需要消耗更多的内存。 由于在训练过程中扩散模型预训练参数为锁定副本,因此在使用不同的控制条件训练时,只需要切换 ControlNet 可训练副本的参数即可。这样在一个应用程序中部署多个 ControlNet 权重就非常简单了,本文会在后面详细介绍。 ## `StableDiffusionControlNetPipeline` 在开始之前,我们要向社区贡献者 [Takuma Mori](https://github.com/takuma104) 表示巨大的感谢。将 ControlNet 集成到 Diffusers 中,他功不可没 ❤️。 类似 Diffusers 中的 [其他 Pipeline](https://huggingface.co/docs/diffusers/api/pipelines/overview),Diffusers 同样为 ControlNet 提供了 [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) 供用户使用。StableDiffusionControlNetPipeline 的核心是 controlnet 参数,它接收用户指定的训练过的 [`ControlNetModel`](https://huggingface.co/docs/diffusers/main/en/api/models#diffusers.ControlNetModel) 实例作为输入,同时保持扩散模型的预训练权重不变。 本文将介绍 StableDiffusionControlNetPipeline 的多个不同用例。首先要介绍的第一个 ControlNet 模型是 [Canny 模型](https://huggingface.co/runwayml/stable-diffusion-v1-5),这是目前最流行的 ControlNet 模型之一,您可能已经在网上见识过一些它生成的精美图片。 在阅读到各个部分的代码时,也欢迎您使用此 [Colab 笔记本](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb) 运行相关代码片段。 运行代码之前,首先确保我们已经安装好所有必要的库: ```bash pip install diffusers==0.14.0 transformers xformers git+https://github.com/huggingface/accelerate.git ``` 为处理不同 ControlNet 对应的多种控制条件,还需要安装一些额外的依赖项: - [OpenCV](https://opencv.org/) - [controlnet-aux](https://github.com/patrickvonplaten/controlnet_aux#controlnet-auxiliary-models) - ControlNet 预处理模型库 ```bash pip install opencv-contrib-python pip install controlnet_aux ``` 我们将以著名的油画作品 [《戴珍珠耳环的少女》](https://en.wikipedia.org/wiki/Girl_with_a_Pearl_Earring) 为例,首先让我们下载这张图像并查看一下: ```python from diffusers.utils import load_image image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ) image ``` <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_6_output_0.jpeg" width=600/> </p> 然后将图像输入给 Canny 预处理器: ```python import cv2 from PIL import Image import numpy as np image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) canny_image ``` 如图可见,Canny 本质上是边缘检测器: <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg" width=600/> </p> 接下来,我们加载 [runwaylml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) 和 [Canny 边缘 ControlNet 模型](https://huggingface.co/lllyasviel/sd-controlnet-canny)。设置参数 torch.dtype=torch.float16 可以指定模型以半精度模式加载,可实现内存高效和快速的推理。 ```python from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ) ``` 这里我们不使用 Stable Diffusion 默认的 [PNDMScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/pndm) 调度器,而使用改进的 [UniPCMultistepScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/unipc) (目前最快的扩散模型调度器之一),可以极大地加快推理速度。经测试,在保证生成图像质量的同时,我们能将推理阶段的采样步数从 50 降到 20。更多关于调度器的信息可以点击 [此处](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers) 查看。 ```python from diffusers import UniPCMultistepScheduler pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) ``` 我们通过调用 [enable_model_cpu_offload 函数](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet#diffusers.StableDiffusionControlNetPipeline.enable_model_cpu_offload)来启用智能 CPU 卸载,而不是直接将 pipeline 加载到 GPU 上。 智能 CPU 卸载是一种降低显存占用的方法。扩散模型 (如 Stable Diffusion) 的推理并不是运行一个单独的模型,而是多个模型组件的串行推理。如在推理 ControlNet Stable Diffusion 时,需要首先运行 CLIP 文本编码器,其次推理扩散模型 UNet 和 ControlNet,然后运行 VAE 解码器,最后运行 safety checker (安全检查器,主要用于审核过滤违规图像)。而在扩散过程中大多数组件仅运行一次,因此不需要一直占用 GPU 内存。通过启用智能模型卸载,可以确保每个组件在不需要参与 GPU 计算时卸载到 CPU 上,从而显著降低显存占用,并且不会显著增加推理时间 (仅增加了模型在 GPU-CPU 之间的转移时间)。 **注意**: 启用 `enable_model_cpu_offload` 后,pipeline 会自动进行 GPU 内存管理,因此请不要再使用 `.to("cuda")` 手动将 pipeline 转移到 GPU。 ```py pipe.enable_model_cpu_offload() ``` 最后,我们要充分利用 [FlashAttention/xformers](https://github.com/facebookresearch/xformers) 进行注意力层加速。运行下列代码以实现加速,如果该代码没有起作用,那么您可能没有正确安装 `xformers` 库,此时您可以跳过该代码。 ```py pipe.enable_xformers_memory_efficient_attention() ``` 基本条件准备就绪,现在来运行 ControlNet pipeline! 跟运行 Stable Diffusion image-to-image pipeline 相同的是,我们也使用了文本提示语来引导图像生成过程。不过有一些不同的是,ControlNet 允许施加更多种类的控制条件来控制图像生成过程,比如使用刚才我们创建的 Canny 边缘图就能更精确的控制生成图像的构图。 让我们来看一些有趣的,将 17 世纪的名作《戴珍珠耳环的少女》中的少女一角换为现代的名人会是什么样?使用 ControlNet 就能轻松做到,只需要在提示语中写上他们的名字即可! 首先创建一个非常简单的帮助函数来实现生成图像的网格可视化。 ```python def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid ``` 然后输入名字提示语,并设置随机种子以便复现。 ```py prompt = ", best quality, extremely detailed" prompt = [t + prompt for t in ["Sandra Oh", "Kim Kardashian", "rihanna", "taylor swift"]] # 分别为: 吴珊卓、金·卡戴珊、蕾哈娜、泰勒·斯威夫特 generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(len(prompt))] ``` 最后运行 pipeline,并可视化生成的图像! ```py output = pipe( prompt, canny_image, negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * 4, num_inference_steps=20, generator=generator, ) image_grid(output.images, 2, 2) ``` <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_16_output_1.jpeg" width=600/> </p> 我们还能轻松地将 ControlNet 与微调结合使用!例如使用 [DreamBooth](https://huggingface.co/docs/diffusers/main/en/training/dreambooth) 对模型进行微调,然后使用 ControlNet 增加控制信息,将其渲染到不同的场景中。 本文将以我们最爱的土豆先生为例,来介绍怎样结合使用 ControlNet 和 DreamBooth。 相较于上文,pipeline 中使用的 ControlNet 部分保持不变,但是不使用 Stable Diffusion 1.5,而是重新加载一个 [土豆先生](https://huggingface.co/sd-dreambooth-library/mr-potato-head) 模型 (使用 Dreambooth 微调的 Stable Diffusion 模型) 🥔。 虽然 ControlNet 没变,但仍然需要重新加载 pipeline。 ```python model_id = "sd-dreambooth-library/mr-potato-head" pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16, ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_xformers_memory_efficient_attention() ``` 现在来让土豆先生摆一个《戴珍珠耳环的少女》的姿势吧! ```python generator = torch.manual_seed(2) prompt = "a photo of sks mr potato head, best quality, extremely detailed" output = pipe( prompt, canny_image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=20, generator=generator, ) output.images[0] ``` 看得出来土豆先生尽力了,这场景着实不太适合他,不过他仍然抓住了精髓 🍟。 <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_22_output_0.jpeg" width=600/> </p> ControlNet 还有另一个独特应用: 从图像提取人体姿态,用姿态信息控制生成具有相同姿态的新图像。因此在下一个示例中,我们将使用 [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose) 来教超级英雄如何做瑜伽! 首先,我们需要收集一些瑜伽动作图像集: ```python urls = "yoga1.jpeg", "yoga2.jpeg", "yoga3.jpeg", "yoga4.jpeg" imgs = [ load_image("https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/" + url) for url in urls ] image_grid(imgs, 2, 2) ``` <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_25_output_0.jpeg" width=600/> </p> 通过 `controlnet_aux` 提供的 OpenPose 预处理器,我们可以很方便地提取瑜伽姿态。 ```python from controlnet_aux import OpenposeDetector model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") poses = [model(img) for img in imgs] image_grid(poses, 2, 2) ``` <p align="center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_28_output_0.jpeg" width=600/> </p> 瑜伽姿态提取完成后,我们接着创建一个 [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose) pipeline 来生成一些相同姿态的超级英雄图像。Let's go 🚀 ```python controlnet = ControlNetModel.from_pretrained( "fusing/stable-diffusion-v1-5-controlnet-openpose", torch_dtype=torch.float16 ) model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16, ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() ``` 超级英雄的瑜伽时间! ```python generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(4)] prompt = "super-hero character, best quality, extremely detailed" output = pipe( [prompt] * 4, poses, negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * 4, generator=generator, num_inference_steps=20, ) image_grid(output.images, 2, 2) ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/anime_do_yoga.png" width=600/> </p> ### 组合多种条件 我们可以将多个 ControlNet 的约束条件组合起来作用在同一个图像生成过程中。将一个 ControlNet 数组传给 pipeline 构造函数,以及将对应的数组传给 `__call__` 调用。 在组合约束条件时,对条件进行掩蔽操作可以有效避免其作用重叠。在这个例子中,我们将 canny map 的中间部分掩蔽,因为那里是姿态条件的作用范围。 另外,我们还可以通过变化不同的 `controlnet_conditioning_scale` 值来实现对不同约束条件的突出强化。 #### Canny 约束 原始图像 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" width=600/> </p> 准备约束条件 ```python from diffusers.utils import load_image from PIL import Image import cv2 import numpy as np from diffusers.utils import load_image canny_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" ) canny_image = np.array(canny_image) low_threshold = 100 high_threshold = 200 canny_image = cv2.Canny(canny_image, low_threshold, high_threshold) # zero out middle columns of image where pose will be overlayed zero_start = canny_image.shape[1] // 4 zero_end = zero_start + canny_image.shape[1] // 2 canny_image[:, zero_start:zero_end] = 0 canny_image = canny_image[:, :, None] canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2) canny_image = Image.fromarray(canny_image) ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/landscape_canny_masked.png" width=600/> </p> #### Openpose 约束 原始图像 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png" width=600/> </p> 准备约束条件 ```python from controlnet_aux import OpenposeDetector from diffusers.utils import load_image openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") openpose_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png" ) openpose_image = openpose(openpose_image) ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/person_pose.png" width=600/> </p> #### 在多重约束下运行 ControlNet ```python from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import torch controlnet = [ ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16), ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16), ] pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() prompt = "a giant standing in a fantasy landscape, best quality" negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" generator = torch.Generator(device="cpu").manual_seed(1) images = [openpose_image, canny_image] image = pipe( prompt, images, num_inference_steps=20, generator=generator, negative_prompt=negative_prompt, controlnet_conditioning_scale=[1.0, 0.8], ).images[0] image.save("./multi_controlnet_output.png") ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/multi_controlnet_output.png" width=600/> </p> 通过以上示例,我们对 [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) 的多种用法有了直观的认识,也学会了如何使用 Diffusers 玩转 ControlNet。不过,还有一些 ControlNet 支持的其他类型的控制条件示例,由于篇幅原因本文不再展开,如想了解更多信息,可以点击以下链接查看相应的模型文档页面: * [lllyasviel/sd-controlnet-depth](https://huggingface.co/lllyasviel/sd-controlnet-depth) * [lllyasviel/sd-controlnet-hed](https://huggingface.co/lllyasviel/sd-controlnet-hed) * [lllyasviel/sd-controlnet-normal](https://huggingface.co/lllyasviel/sd-controlnet-normal) * [lllyasviel/sd-controlnet-scribble](https://huggingface.co/lllyasviel/sd-controlnet-scribble) * [lllyasviel/sd-controlnet-seg](https://huggingface.co/lllyasviel/sd-controlnet-scribble) * [lllyasviel/sd-controlnet-openpose](https://huggingface.co/lllyasviel/sd-controlnet-openpose) * [lllyasviel/sd-controlnet-mlsd](https://huggingface.co/lllyasviel/sd-controlnet-mlsd) * [lllyasviel/sd-controlnet-canny](https://huggingface.co/lllyasviel/sd-controlnet-canny) 我们非常欢迎您尝试组合不同的控制组件来生成精美的图像,并在 Twitter 上与 [@diffuserslib](https://twitter.com/diffuserslib) 分享您的作品。如果您还没有运行上述代码段,这里再次建议您查看刚才提到的 [Colab 笔记本](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb),亲自运行代码体验示例的效果! 在上文中,我们介绍了加速生成过程、减少显存占用的一些技巧,它们包括: 快速调度器、智能模型卸载、`xformers`。如果结合使用这些技巧,单张图像的生成过程仅需要: V100 GPU 上约 3 秒的推理时间以及约 4 GB 的 VRAM 占用;免费 GPU 服务 (如 Google Colab 的 T4) 上约 5 秒的推理时间。如果没有实现这些技巧,同样的生成过程可达 17 秒!现已集成至 Diffusers 工具箱,来使用 Diffusers 吧,它真的非常强力!💪 ## 结语 本文介绍了 [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) 的多个用例,非常有趣!我们也非常期待看到社区在此 pipeline 的基础上能构建出什么好玩的应用。如果您想了解更多 Diffusers 支持的关于控制模型的其他 pipeline 和技术细节,请查看我们的 [官方文档](https://huggingface.co/docs/diffusers/main/en/using-diffusers/controlling_generation)。 如果您想直接尝试 ControlNet 的控制效果,我们也能满足!只需点击以下 HuggingFace Spaces 即可尝试控制生成图像: - [![Canny ControlNet Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/diffusers/controlnet-canny) - [![OpenPose ControlNet Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/diffusers/controlnet-openpose)
8
0
hf_public_repos/blog
hf_public_repos/blog/zh/starchat-alpha.md
--- title: "使用 StarCoder 创建一个编程助手" thumbnail: /blog/assets/starchat_alpha/thumbnail.png authors: - user: lewtun - user: natolambert - user: nazneen - user: edbeeching - user: teven - user: sheonhan - user: philschmid - user: lvwerra - user: srush translators: - user: hugging-hoi2022 - user: zhongdongy proofreader: true --- # 使用 StarCoder 创建一个编程助手 如果你是一个软件开发者,你可能已经使用过 ChatGPT 或 GitHub 的 Copilot 去解决一些写代码过程中遇到的问题,比如将代码从一种语言翻译到另一种语言,或者通过自然语言,诸如“_写一个计算斐波那契数列第 N 个元素的 Python 程序_”,来自动生成代码。尽管这些专有系统功能强大,但它们仍然有很多不足,比如对训练所使用的公共数据透明度的缺失、没有能力去让它们适配自己的使用领域或代码库。 幸运的是,现在我们有了很多高质量开源替代品!包括 SalesForce 为 Python 语言开发的 [CodeGen Mono 16B](https://huggingface.co/Salesforce/codegen-16B-mono),以及 Replit 开发的、在 20 种编程语言上训练过的 [一个 3B 参数量的模型](https://huggingface.co/replit/replit-code-v1-3b)。 而最近新出现的一个选择则是 BigCode 开发的 [StarCoder](https://huggingface.co/bigcode/starcoder),这是一个在一万亿的 token、80 多种编程语言上训练过的 16B 参数量的模型。训练数据多来自 GitHub 上的 issues、使用 Git 提交的代码、Jupyter Notebook 等等 (相关使用都已经过许可)。得益于对企业友好的许可证、长度为 8192 的 token、借助 [multi-query attention](https://arxiv.org/abs/1911.02150) 的快速大批量推理,StarCoder 可以说是当前对代码相关的应用最合适的开源选择。 本文将介绍如何对 StarCoder 进行微调,进而创建一个可以聊天的个人编程助手。这个编程助手我们将称之为 StarChat。借助 StarChat 的开发过程,我们将探索以下几个使用大语言模型 (LLM) 创建编程助手时可能遇到的几个技术细节: - 我们应该怎样对大语言模型进行提词,使得它成为一个对话代理 - 我们也将介绍 OpenAI 的 [Chat Markup Language](https://github.com/openai/openai-python/blob/main/chatml.md) (简称 ChatML),它为人类用户和 AI 助手之间的对话信息传递提供了一种结构化的格式 - 怎样在一个多样性很强的语料库上,使用 🤗 Transformers 和 DeepSpeed ZeRO-3 去微调一个大语言模型 最后,为了尝试一下效果,我们还会问 StarChat 几个编程方面的问题 (参考下面的演示)。 <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.28.2/gradio.js" ></script> <gradio-app theme_mode="light" src="https://huggingfaceh4-starchat-playground.hf.space"></gradio-app> 你也可以查看生成上面演示所使用的代码、数据集和模型: - 代码: [https://github.com/bigcode-project/starcoder](https://github.com/bigcode-project/starcoder) - 数据集: [https://huggingface.co/datasets/HuggingFaceH4/oasst1_en](https://huggingface.co/datasets/HuggingFaceH4/oasst1_en) - 模型: [https://huggingface.co/HuggingFaceH4/starchat-alpha](https://huggingface.co/HuggingFaceH4/starchat-alpha) 接下来第一步,我们先来看看怎样把语言模型转化为一个对话代理。这里我们不是用任何微调。 ## 针对对话任务对大语言模型合理提词 [DeepMind](https://arxiv.org/abs/2209.14375) 和 [Anthropic](https://arxiv.org/abs/2112.00861) 的相关研究指出,大语言模型 (LLM) 可以通过选取合适的文本提示 (prompt) 来转化为对话代理。这些文本提示通常包含一种所谓的“系统”信息来定义 LLM 的角色,以及一系列人机对话的示例。 比如这里,就是 [Anthropic’s HHH prompt](https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt) 这一文本提示的一些节选 (总计 6k 的 token 数量): ``` Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn’t entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn’t let caution get too much in the way of being useful. ----- Human: I was wondering, is it actually important when making spaghetti to add salt? Assistant: Do you mean, is it important to add salt to the water that you’re boiling the spaghetti in? Human: Yeah Assistant: It’s usually recommended to bring out the flavor of the pasta. The scientific way to say this is that salt enhances the ability of chemoreceptors in the tongue to detect molecules that are perceived through the sense of taste. Human: But I mean I’m going to use sauce and stuff, so does it matter? Assistant: It sounds like you’re skeptical. Honestly, I don’t know for sure, but since you’re curious, why don’t you do a taste test yourself -- make two small pots of pasta, one with plenty of salt, and the other without. Then taste the noodles and see which one you prefer? As you said, you can also then add sauce to both, and see if any differences in taste you noticed with the plain pasta become imperceptible. It should be a fun experiment! ----- ... ----- Human: {USER QUERY} Assistant: ``` As we can see, the first part of the prompt “Below are a series…” corresponds to the system message and specifies that the assistant should have characteristics like “helpfulness” and “politeness”. The dialogue examples then condition the model to follow the multi-turn format of a conversation. When a user asks a question, the whole prompt is fed to the model and it generates an answer after the `Assistant:` prefix. The answer is then concatenated to the prompt and the process repeated at every turn. Somewhat surprisingly, this technique also works for StarCoder! This is enabled by the model’s 8k token context length, which allows one to include a wide variety of programming examples and covert the model into a coding assistant. Here’s an excerpt of the StarCoder prompt: ``` Below are a series of dialogues between various people and an AI technical assistant. The assistant tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with code questions, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn’t entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn’t let caution get too much in the way of being useful. The Starcoder models are a series of 15.5B parameter models trained on 80+ programming languages from The Stack (v1.2) (excluding opt-out requests). The model uses Multi Query Attention, was trained using the Fill-in-the-Middle objective and with 8,192 tokens context window for a trillion tokens of heavily deduplicated data. ----- Human: Write a function that takes two lists and returns a list that has alternating elements from each input list. Assistant: Sure. Here is a function that does that. def alternating(list1, list2): results = [] for i in range(len(list1)): results.append(list1[i]) results.append(list2[i]) return results Human: Can you write some test cases for this function? Assistant: Sure, here are some tests. assert alternating([10, 20, 30], [1, 2, 3]) == [10, 1, 20, 2, 30, 3] assert alternating([True, False], [4, 5]) == [True, 4, False, 5] assert alternating([], []) == [] Human: Modify the function so that it returns all input elements when the lists have uneven length. The elements from the longer list should be at the end. Assistant: Here is the modified function. def alternating(list1, list2): results = [] for i in range(min(len(list1), len(list2))): results.append(list1[i]) results.append(list2[i]) if len(list1) > len(list2): results.extend(list1[i+1:]) else: results.extend(list2[i+1:]) return results ----- ``` 这里我们就可以看到精心打造的文本提示是如何引导出像 ChatGPT 中看到的那样的编程行为的。完整的文本提示可以在 [这里](https://huggingface.co/datasets/bigcode/ta-prompt/blob/main/TA_prompt_v1.txt) 找到,你也可以在 [HuggingChat](https://hf.co/chat/?model=bigcode/starcoder) 上尝试和受提示的 StarCoder 聊天。 然而,一个明显的缺陷就是推理成本会非常高: 每次对话都需要有上千的 token 被输入进去,这会非常消耗推理资源! 所以,一个显而易见的改进措施就是使用一个对话的语料库去微调这个大语言模型,使得它会聊天。接下来我们就看看几个有趣的数据集,这几个数据集最近登陆了 HuggingFace Hub,当前很多开源的聊天机器人都是基于它们训练的。 ## 对话语言模型的数据集 如今的开源社区正在加快创建多样和高性能的数据集,以便将各种基础的语言模型转换为能遵照指示来对话的对话代理模型。这里我们找了一些示例数据集,可以用于生产对话语言模型: - [OpenAssistant’s dataset](https://huggingface.co/datasets/OpenAssistant/oasst1): 包含超过四万段对话,由社区的人轮流模仿用户或 AI 的角色而产生。 - [The ShareGPT dataset](https://huggingface.co/datasets/RyokoAI/ShareGPT52K): 包含了大约九万段人类用户和 ChatGPT 的对话。 在本文中,我们将使用 OpenAssistant 来微调 StarCoder,主要是出于许可证的原因,而且它是完全由人工生成的。 由于原始的数据集是以对话树的格式收集起来的,我们预处理了数据,确保每行单独对应一段用户和 AI 模型的对话。为防止模型演化得距离原始预训练数据太远,我们也过滤掉了非英语文本。 首先我们下载这个已经处理过的数据集: ```python from datasets import load_dataset dataset = load_dataset("HuggingFaceH4/oasst1_en") print(dataset) ``` ``` DatasetDict({ train: Dataset({ features: ['messages'], num_rows: 19034 }) test: Dataset({ features: ['messages'], num_rows: 2115 }) }) ``` 可以看到,数据集包含大约 21000 段英文对话。我们先来看看这些训练数据,拿第一条数据看看: ```python sample = dataset["train"][0] print(sample) ``` ``` { "messages": [ { "content": "Is it possible to imagine a society without law?", "role": "user", }, { "content": "It is difficult to imagine a society that is able to be maintained without any semblance of Law. Laws exists for the purpose of maintaining society and past human behavior suggests that there would people that have no interest in promoting social cohesion and norms so it's unlikely that complex social and economic systems could develop without a legal system.", "role": "assistant", }, { "content": 'It seems like you consider the absence of law equal to the absence of anything that could guide the behaviour of the individual, however there are many other such things, like religion, conscience, tradition to mention few important patterns which are known to have crucial influence on society even today, but also we could imagine more, like universal love if everyone suddenly became "awaken" in some spiritual sense, or simply just having an omnipotent leader who maintains a certain order without ever codifying the principles in law. Is it still difficult to imagine?', "role": "user", }, { "content": "You are correct that there are other factors that can guide behavior in a society and play a role in shaping individuals' behavior and interactions with each other. However, even in societies where these factors are present, laws still serve an important role in maintaining social order and resolving conflicts. \n\nFor example, religious teachings may prohibit theft and murder, but laws provide a system of justice for those who violate these prohibitions. Personal morality and conscience may guide individuals to act in a certain way, but laws provide a framework for resolving disputes when conflicting moral beliefs arise. Furthermore, the other factors you mentioned, such as universal love or an omnipotent leader, are not necessarily stable or reliable guides for behavior. Universal love, while a noble ideal, may not be achievable in practice, and an omnipotent leader may be prone to corruption or abuse of power.\n\nLaws provide a crucial foundation for maintaining social order and ensuring justice. It is possible to imagine alternative systems of governance, but it is unlikely that a society without any sort of legal framework would be stable or functional in the long term.", "role": "assistant", }, { "content": "Yeah, but laws are complicated. Most people can't understand them in depth. Some would argue it is almost a self-serving system which put energy into growing itself(eg.: patent trolling). I think there must be a less complex system which keeps up order in society.", "role": "user", }, ] } ``` 这是一段关于伦理学的有趣对话。每一轮对话信息都包含了 role 和 content 两部分,用于指出是谁在说话以及谈话内容是什么。我们接下来看看如何把这些对话转化为标准格式,以便简化推理阶段信息的生成方式。 ### 对话数据的标准格式 一种在对话数据上微调模型的方法是,单纯地把系统信息和角色信息插入到每个训练样本中,然后把对话用“序列结尾”的 token (如 \<EOS\>) 分隔开。举例而言,上面的对话可以转换成这个形式: ``` Below is a dialogue between a human and AI assistant ... Human: Is it possible to imagine a society without law? Assistant: It is difficult to imagine ... Human: It seems like you ... Assistant: You are correct ... Human: Yeah, but laws are complicated .. <EOS> ``` 虽然这种方法对训练而言是可行的,但它对于推理而言并不理想。因为模型会很自然地生层不想要的对话轮次,直到它输出了一个 \<EOS\> 的 token,因此还需要一些后处理或额外设计的逻辑来阻止这一情况。 一个更好的方法是使用一种结构化的格式,比如 [ChatML](https://github.com/openai/openai-python/blob/main/chatml.md)。这种格式会对每一个对话轮次进行包装。包装使用的是一些特殊的 token,用以标明询问或回答的角色。 在这种格式下,我们使用这些特殊的 token: - `<|system|>`: 表示系统信息开始的地方,这里的系统信息描述了这个聊天机器人的身份角色。 - `<|user|>`: 表示这里的话语是人类用户说出来的。 - `<|assistant|>`: 表示这里的话语是 AI 机器人说出来的。 - `<|end|>`: 表示说话内容的结尾,或系统信息的结尾。 下面我们写一个函数,把我们的实例数据用这些特殊的 token 包装起来: ```python system_token = "<|assistant|>" user_token = "<|user|>" assistant_token = "<|assistant|>" end_token = "<|end|>" def prepare_dialogue(example): system_msg = "Below is a dialogue between a human and an AI assistant called StarChat." prompt = system_token + "\n" + system_msg + end_token + "\n" for message in example["messages"]: if message["role"] == "user": prompt += user_token + "\n" + message["content"] + end_token + "\n" else: prompt += assistant_token + "\n" + message["content"] + end_token + "\n" return prompt print(prepare_dialogue(sample)) ``` ``` <|system|> Below is a dialogue between a human and AI assistant called StarChat. <|end|> <|user|> Is it possible to imagine a society without law?<|end|> <|assistant|> It is difficult to imagine ...<|end|> <|user|> It seems like you ...<|end|> <|assistant|> You are correct ...<|end|> <|user|> Yeah, but laws are complicated ...<|end|> ``` 以上就是包装好后的数据!下一步,我们还需要把这些特殊的 token 加入到分词器 (tokenizer) 的词汇表中。我们这里下载 StarCoder 的分词器,然后加入这些特殊 token: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoderbase") tokenizer.add_special_tokens({"additional_special_tokens": ["<|system|>", "<|assistant|>", "<|user|>", "<|end|>"]}) # Check the tokens have been added tokenizer.special_tokens_map ``` ``` { "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "additional_special_tokens": ["<|system|>", "<|assistant|>", "<|user|>", "<|end|>"], } ``` 作为检验,我们看看把 “<|assistant|>” 输入到分词器中是否会输出单独一个 token 的 ID: ```python tokenizer("<|assistant|>") ``` ``` {"input_ids": [49153], "attention_mask": [1]} ``` 很好!有效! ### 掩盖掉用户话语部分的标签 使用特殊 token 还有一个好处,就是我们可以把来自用户话语部分的损失函数值给掩盖掉。因为我们的模型是基于用户的话语而只被训练去预测 AI 助手说话的部分 (模型推理时只需要根据用户的话回答用户)。下面就是一个简单的函数,用于掩盖掉用户部分的标签,并把所有的用户部分的 token 转为 -100 (接下来 -100 会被损失函数忽略掉): ```python def mask_user_labels(tokenizer, labels): user_token_id = tokenizer.convert_tokens_to_ids(user_token) assistant_token_id = tokenizer.convert_tokens_to_ids(assistant_token) for idx, label_id in enumerate(labels): if label_id == user_token_id: current_idx = idx while labels[current_idx]!= assistant_token_id and current_idx < len(labels): labels[current_idx] = -100 # Ignored by the loss current_idx += 1 dialogue = "<|user|>\nHello, can you help me?<|end|>\n<|assistant|>\nSure, what can I do for you?<|end|>\n" input_ids = tokenizer(dialogue).input_ids labels = input_ids.copy() mask_user_labels(tokenizer, labels) labels ``` ``` [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 49153, 203, 69, 513, 30, 2769, 883, 439, 745, 436, 844, 49, 49155, 203] ``` 可以看到,用户部分的输入 ID 全都被掩盖掉了。这些特殊的 token 在微调阶段将会学习到自己特定的嵌入 (embedding)。接下来我们看看如何微调。 ## 使用 DeepSpeed ZeRO-3 微调 StarCoder StarCoder 和 StarCoderBase 模型的参数量达到了 160 亿,如果我们把模型以 FP32 的精度载入到 GPU 中,将需要大约 60 GB 的 vRAM。然而幸运的是,我们有其它方法去应对这种规模的大模型: - 使用对参数而言更高效的一些技术,如 LoRA,保持基础模型的权重不变,插入少量的需要学习的参数。类似的技术可以在 [🤗 PEFT](https://github.com/huggingface/peft) 中找到。 - 使用 [DeepSpeed ZeRO-3](https://huggingface.co/docs/transformers/main_classes/deepspeed) 或 [FSDP](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) 等方法,在多个 GPU 之间共享模型权重、优化器状态以及提督信息。 我们将使用 DeepSpeed 来训练我们的模型,因为它已经被整合进了 🤗 Transformers。首先,我们先从 GitHub 下载 StarCoder 的代码仓库,进入 `chat` 文件夹: ```shell git clone https://github.com/bigcode-project/starcoder.git cd starcoder/chat ``` 接下来用 Conda 创建一个 Python 的虚拟环境: ```shell conda create -n starchat python=3.10 && conda activate starchat ``` 再然后,安装 PyTorch (这里使用 v1.13.1,注意这一步和硬件有关,请参考官方安装页面)。之后安装本项目的相关依赖项: ```shell pip install -r requirements.txt ``` 同时,我们还需要登录上 Hugging Face。执行以下指令: ```shell huggingface-cli login ``` 最后,安装 Git LFS: ```shell sudo apt-get install git-lfs ``` 接下来我们就可以训练了!如果你有幸拥有 8 个 A100 (80 GB 显存),你可以通过下下面的命令去开始训练。训练会花费大约 45 分钟: ```shell torchrun --nproc_per_node=8 train.py config.yaml --deepspeed=deepspeed_z3_config_bf16.json ``` 这里的 `config.yaml` 指定了关于数据集、模型、训练的所有参数。你可以在 [这里](https://github.com/bigcode-project/starcoder/tree/main/chat) 重新配置它,以适应新的训练数据集。稍后,训练好的模型将会出现在 Hub 上。 ## 使用 StarCoder 作为一个编程助手 ### 绘图 仿照著名的 [让 GPT-4 用 TikZ 画独角兽](https://tex.stackexchange.com/questions/681418/draw-a-unicorn-in-tikz) 的实验,我们想看看我们的模型是否可以完成一些基本的数据可视化编程任务。为此,我们向我们的模型提出了一些编程任务,得到了出色的结果!是的,这是我们精心挑选的,因为我们只选了那些真正能运行的代码,但一些其它结果也差不了太远。 **例 1: 绘制柱状图** 用户提出的文本提示: ``` Give the following data {'Name':['Tom', 'Brad', 'Kyle', 'Jerry'], 'Age':[20, 21, 19, 18], 'Height' : [6.1, 5.9, 6.0, 6.1]}. Can you plot one graph with two subplots as columns. The first is a bar graph showing the height of each person. The second is a bargraph showing the age of each person? Draw the graph in seaborn talk mode. ``` 模型给出的回答: ```python import pandas as pd import matplotlib.pyplot as plt %matplotlib inline plt.style.use('seaborn-talk') data = {'Name':['Tom', 'Brad', 'Kyle', 'Jerry'], 'Age':[20, 21, 19, 18], 'Height':[6.1, 5.9, 6.0, 6.1]} df=pd.DataFrame(data) # Bar Graph for Height and Age fig,(ax1, ax2)=plt.subplots(nrows=1, ncols=2, figsize=(14,7)) sns.barplot(x='Name', y="Height", data=df, palette="Blues_d", ax=ax1).set_title("Height Comparison") sns.barplot(x='Name',y ="Age", data=df,palette="Reds_d", ax=ax2 ).set_title("Age Comparison") # Add padding between plots fig.tight_layout() # Show the final image plt.show() ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/starchat-alpha/barplot.png" alt="samples"><br> <em>Example of a plot generated by StarChat</a>.</em> </p> **例 2: 绘制地图** 用户提出的文本提示: ``` Draw me a map of the world using geopandas. Make it so that only Germany and Spain are colored red. ``` 模型给出的回答: ```python import pandas as pd from matplotlib import pyplot as plt import geopandas as gpd from shapely.geometry import Point, Polygon %matplotlib inline # Load data from URL or local file url = 'https://raw.githubusercontent.com/datasets/geo-countries/master/data/countries.geojson' df = gpd.read_file(url) # Create plot fig, ax = plt.subplots() ax.set_aspect('equal') world = gpd.GeoDataFrame({'name': ['World'], 'geometry': [Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])]}) germany = df[df['ADMIN'] == "Germany"] spain = df[df['ADMIN'] == "Spain"] other = df[(df['ADMIN']!= "Germany") & (df['ADMIN']!= "Spain")] world.plot(color='lightgrey', edgecolor='white', ax=ax) germany.plot(color="red", ax=ax) spain.plot(color="red", ax=ax) other.plot(color="skyblue", ax=ax) plt.title("European Countries") plt.show() ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/starchat-alpha/map.png" alt="samples"><br> <em>Example of a plot generated by StarChat</a>.</em> </p> **例 3: 篮球比赛数据的散点图** 用户提出的文本提示: ``` There was a basketball game with the following stats. player, points, rebounds and assists: J. Harden 42 8 9 J. Embiid 34 13 4 T. Maxey 14 8 1 T. Harris 9 7 2 P. Tucker 6 3 1. Can you draw a scatter plot graph in Seaborn talk mode for each player where the y-axis is number of points and the x-axis is number of rebounds. Make sure the marker is 'o'. Put the name of the player over each point. The title should be "Sixers Players". ``` 模型给出的回答: ```python import pandas as pd import seaborn as sns from matplotlib import pyplot as plt %matplotlib inline # Load data into dataframe df = pd.DataFrame({'Player': ['J. Harden', 'J. Embiid', 'T. Maxey', 'T. Harris', 'P. Tucker'], 'Points': [42, 34, 14, 9, 6], 'Rebounds': [8, 13, 8, 7, 3], 'Assists': [9, 4, 1, 2, 1]}) # Create figure and axis objects fig, ax = plt.subplots() # Plot points vs rebounds sns.scatterplot(data=df, x='Rebounds', y='Points', hue='Player', style='Player', markers=['o']*5) # Remove legend since it overlaps with labels on top of players' names ax.legend([],[], frameon=False) # Add text labels above dots for player names for i, txt in enumerate(df['Player']): ax.annotate(txt, (df["Rebounds"][i] +.1, df["Points"][i])) # Set titles and labels ax.set_title('Sixers Player') ax.set_xlabel('Number of Rebounds') ax.set_ylabel('Number of Points') plt.show() ``` <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/starchat-alpha/basketball.png" alt="samples"><br> <em>Example of a plot generated by StarChat</a>.</em> </p> ## 评估编程助手的性能 评估编程助手 (或更广泛地讲,聊天机器人) 其实是一个比较棘手的任务,因为面向用户的评测标准通常难以被传统自然语言处理的基准上体现出来。比如,我们使用基础的和微调过的 StarCoderBase 模型在 EleutherAI 的 [language model evaluation harness](https://github.com/EleutherAI/lm-evaluation-harness) 做如下测试: - [AI2 Reasoning Challenge](https://allenai.org/data/arc) (ARC): 小学难度的科学学科多项选择题 - [HellaSwag](https://arxiv.org/abs/1905.07830): 围绕日常生活的常识推理 - [MMLU](https://github.com/hendrycks/test): 专业和学术领域 57 个学科的多项选择题 - [TruthfulQA](https://arxiv.org/abs/2109.07958): 测试模型能否从一系列错误描述中选出一个事实描述 测试结果在下表中统计了出来。我们可以看出微调过的模型多少有了点提升,但这并不能反映出对话相关的能力。 | Model | ARC | HellaSwag | MMLU | TruthfulQA | | :-: | :-: | :-: | :-: | :-: | | StarCoderBase | 0.30 | 0.46 | 0.33 | 0.40 | | StarChat (alpha) | 0.33 | 0.49 | 0.34 | 0.44 | 那除了使用这种在基准测试集上的指标,我们还可以怎么做评测呢?最近,两种主流的评测方法被提了出来: - 人为评估: 给人类标注者提供一系列基于一个文本提示 (prompt) 的不同回答,从最好到最差对它们排序。这是当前评估模型的黄金法则,创造 InstructGPT 时就使用了这个方法。 - AI 评估: 给一个有足够性能的语言模型 (如 GPT-4) 提供文本提示 (prompt) 和对应的回答,让这个语言模型在质量层面对其进行评估。这一方法曾被用来评估 LMSYS 的 [Vicuna](https://lmsys.org/blog/2023-03-30-vicuna/) 模型。 为了简单起见,我们使用 ChatGPT 去检验我们的 StarCoder 模型在多种编程语言上的性能。为此,我们首先创建了一个 [包含了很多有趣的文本提示的数据集](https://huggingface.co/datasets/HuggingFaceH4/code_evaluation_prompts)。我们使用 ChatGPT 去创建这个数据集,通过问它类似这样的问题: ``` Generate a bunch of instructions for coding questions in python (in the format of {"prompt": instruction}) ``` 或者 ``` Can you generate 5 examples of instructions, with the same format {"prompt": text}, where the instruction has a piece of code with a bug, and you're asking for feedback on your code as if you wrote it? ``` 在第二个例子中,ChatGPT 实际上生成了比我们要求更多的数据。当前,这个数据集包含了 115 条文本提示 (prompt),而且主要是使用 Python。四分之三的文本提示是要求提供代码的,剩下四分之一是索要针对有 bug 代码的反馈的。 在我们的实验中,我们要求 OpenAI 的模型去评估得到的回答,对它们进行 1 到 8 分的打分,使用的是略加修改的 [Vicuna code prompt](https://github.com/lm-sys/FastChat/tree/main/fastchat/eval/table) 数据。这里,使用我们这个数据集微调过的 StarCoder 在 95.6% 的情况下得分都高于原始的基础模型。我们还发现了一个有趣的瑕疵现象: ChatGPT 喜欢打处于中位的安全分,而 GPT-4 更敢于去给极端分数,如 1 分和 8 分。 以下示例就展示了大语言模型 (LLM) 是如何去给一个问答的质量打分的: 文本提示 (prompt): ``` Write a Python function called reverse_string that takes a string as its argument and returns the string reversed. ``` 经过微调的模型的回答 (助手 2): ``` "Here is an example implementation of the `reverse_string` function in python: ''' def reverse_string(input_str): \"\"\"Reverse a given input string\"\"\" # create empty list to store characters output = [] for char in input_str: # add each character to front of list output.insert(0,char) return ''.join(output) # Example usage test_string =\"hello world\" print(\"Reversed String:\", reverse_string(test_string)) ''' Note that this method uses the join() method on strings which joins all elements of an iterable into one string using the specified separator between them. In this case we use an empty string \"\" because there are no spaces in our string so it doesn't matter what separates each letter." ``` 原始的基础模型的回答 (助手 1): ``` "Sure thing! Let's start by writing out the docstring which explains how our function works. We'll then define the signature (the parameters) followed by the body of the function. reverse_string.__doc__ = \"Reverses any given string.\" reverse_string.__name__ ='reverse_string' reverse_string.__signature__ = inspect.Signature() #<|end|> <|user|> Here is my attempt: <|end|> <|assistant|> Okay great! Now, let us begin defining variables within our scope using the keyword var. Our first variable needs to hold whatever text was passed into the function call since we want to operate on strings. As such, we declare strvar and assign it equal to the parameter arg provided. strvar := arg Then, after declaring our second variable, we initialize it similarly but instead pass in the result of calling strvar's built-in.reversed() method alongside the index operator []. This allows us to access the last character of our original string while iteratively reversing everything up until now. Note that we don't actually need to store anything inside this new variable because it simply holds the same data as strvar without modifying them" ``` GPT4 给出的评价: ``` 4 7 Assistant 1's code was not well-structured, and it was difficult to follow the logic. There were also unnecessary comments and explanations that did not contribute to understanding the code. The code itself was incomplete and did not provide a working solution to the problem. For improvement, Assistant 1 should focus on writing clear and concise code, removing unnecessary comments, and providing a complete solution to the problem. Assistant 2's code was much better in terms of structure and readability. The code was accurate, efficient, and included helpful comments. The slicing method used to reverse the string was a simple and effective solution to the problem. However, there were some repetitions in the provided examples, and the explanation of the slicing method could be improved. Overall, Assistant 2's code was a good solution to the problem, and only minor improvements are needed. ``` 下面是 ChatGPT 的回复,似乎 ChatGPT 忽视了助手 1 并未完成任务这个事实。ChapGPT 说第二个回答更好,然而却给助手 2 打了相对较低的分数。 ChatGPT 给出的评价: ``` 8 7 Assistant 1's solution attempts to use a variable assignment operator, :=, which is a new feature added to Python 3.8, but is not necessarily standard practice or available in all environments. Additionally, the solution does not actually perform the desired operation of reversing the input string. Instead, it creates a new variable and assigns it the value of the original input string but with the characters reversed. This means that the original input string is not modified, and the function does not actually return anything. Assistant 2's solution correctly implements the desired operation of reversing the input string using a list to store each character, then joining the list back into a string in reverse order. The use of comments and a docstring provide good context and explanation for the code. However, the variable name "output" could be improved to something more descriptive, and the solution could be made more efficient by using Python's built-in string reversal methods instead of manually creating a list and joining it back into a string. Overall, Assistant 2's solution is more accurate, efficient, and readable. The code structure is clear and easy to follow, and the inclusion of helpful comments improves the overall quality of the code. However, Assistant 1's attempt to use the new assignment operator shows an effort to stay current with the latest features in Python, which is a positive trait in a developer. ``` 看起来,即使 AI 给出的评价也挺有价值,但我们还是有必要人为地去对比一下模型、适当修正结果! ## 局限性和偏向性 和很多语言模型一样,这版 Alpha 版的 StarChat 还是有着很明显的待解决的局限性问题,包括趋向于去掩盖事实以及生成有问题的回答 (尤其是我们故意引导它这么做时)。这是由于这个模型还没有通过类似 RLHF 的技术去对齐人类的偏好,也没有在部署时像 ChatGPT 一样添加避免进入循环性回复的逻辑。此外,主要依赖代码作为训练数据,也会产生和 GitHub 的群体性量级相当的扭曲的群体性偏差,具体情况可以详细参考 [StarCoder 数据集](https://huggingface.co/datasets/bigcode/starcoderdata)。读者还可以参考对应的 [model card](https://huggingface.co/HuggingFaceH4/starchat-alpha#bias-risks-and-limitations) 来更详细地了解模型在事实性和偏向性方面的问题。 ## 未来的工作 基于我们上述的各种实验,我们很惊讶地发现,像 StarCoder 这样的代码生成模型,可以通过在诸如 OpenAssistant 的数据集上微调,被转化为一个对话机器人。一种可能的解释是,因为 StarCoder 已经在代码和 GitHub 的 issue 上训练过了,而后者提供了丰富的自然语言信息。我们期待看到社区引领 StarCoder 走向新的方向,甚至激发下一个开源对话问答助手的热潮 🤗。 ## 致谢 我们感谢 Nicolas Patry 和 Olivier Dehaene,他们在部署 StarCoder 到 Inference API,以及实现 [blazing fast text generation](https://github.com/huggingface/text-generation-inference) 方面提供了很多帮助。我们也感谢 Omar Sanseviero 在数据收集方面给出的指导,以及他为改进演示示例提出的宝贵建议。最后,我们也感谢 Abubakar Abid 和 Gradio 团队提供的完美开发体验,以及为制作出色演示示例所分享的专业知识。 ## 相关链接 - 代码: [https://github.com/bigcode-project/starcoder/tree/main/chat](https://github.com/bigcode-project/starcoder/tree/main/chat) - 经过过滤的训练数据集: [https://huggingface.co/datasets/HuggingFaceH4/oasst1_en](https://huggingface.co/datasets/HuggingFaceH4/oasst1_en) - 代码评估使用的数据集: [https://huggingface.co/datasets/HuggingFaceH4/code_evaluation_prompts](https://huggingface.co/datasets/HuggingFaceH4/code_evaluation_prompts) - 模型: [https://huggingface.co/HuggingFaceH4/starchat-alpha](https://huggingface.co/HuggingFaceH4/starchat-alpha) ## 引用 如有需要,请按照如下方式引用本篇文章。 ``` @article{Tunstall2023starchat-alpha, author = {Tunstall, Lewis and Lambert, Nathan and Rajani, Nazneen and Beeching, Edward and Le Scao, Teven and von Werra, Leandro and Han, Sheon and Schmid, Philipp and Rush, Alexander}, title = {Creating a Coding Assistant with StarCoder}, journal = {Hugging Face Blog}, year = {2023}, note = {https://huggingface.co/blog/starchat-alpha}, } ```
9
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/template/base_network_bench.slurm
#!/bin/bash #SBATCH --job-name=bench_cluster #SBATCH --time=00:59:00 #SBATCH --partition=hopper-prod #SBATCH --nodes={{ nodes }} #SBATCH --gres=gpu:{{ n_proc_per_node }} #SBATCH --qos={{ qos }} #SBATCH --ntasks-per-node=1 #SBATCH --cpus-per-task=96 #SBATCH --exclusive #SBATCH --output={{ root_path }}/log_{{ nodes * n_proc_per_node }}_gpus.out #SBATCH --error={{ root_path }}/log_{{ nodes * n_proc_per_node }}_gpus.out source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster export CUBLAS_WORKSPACE_CONFIG=":4096:8" export CUDA_DEVICE_MAX_CONNECTIONS="1" MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) MASTER_PORT=6000 LAUNCHER="torchrun \ --nproc_per_node {{ n_proc_per_node }} \ --nnodes {{ nodes }} \ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \ --rdzv_backend c10d \ --max_restarts 0 \ --tee 3 \ --node_rank ${SLURM_PROCID}" cd /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster # Run all_gather $LAUNCHER /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster/communication/all_gather.py --trials {{ trials }} --warmups {{ warmups }} --maxsize {{ maxsize }} --bw_unit {{ bw_unit }} --dtype {{ dtype }} --mem_factor {{ mem_factor }} # Run all_reduce $LAUNCHER /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster/communication/all_reduce.py --trials {{ trials }} --warmups {{ warmups }} --maxsize {{ maxsize }} --bw_unit {{ bw_unit }} --dtype {{ dtype }} --mem_factor {{ mem_factor }} # Run all_to_all $LAUNCHER /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster/communication/all_to_all.py --trials {{ trials }} --warmups {{ warmups }} --maxsize {{ maxsize }} --bw_unit {{ bw_unit }} --dtype {{ dtype }} --mem_factor {{ mem_factor }} # Run broadcast $LAUNCHER /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster/communication/broadcast.py --trials {{ trials }} --warmups {{ warmups }} --maxsize {{ maxsize }} --bw_unit {{ bw_unit }} --dtype {{ dtype }} --mem_factor {{ mem_factor }} # Run p2p $LAUNCHER /fsx/ferdinandmom/ferdinand-hf/bench_cluster/bench_cluster/communication/p2p.py --trials {{ trials }} --warmups {{ warmups }} --maxsize {{ maxsize }} --bw_unit {{ bw_unit }} --dtype {{ dtype }} --mem_factor {{ mem_factor }}
0
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/template/base_config.py
base_config = { 'general': { 'project': 'bench_cluster', 'seed': 42 }, 'model': { 'ddp_bucket_cap_mb': 25, 'dtype': 'bfloat16', 'init_method': { 'std': 0.025 }, 'make_vocab_size_divisible_by': 1, 'model_config' : { 'bos_token_id': 1, 'eos_token_id': 2, 'hidden_act': 'silu', 'hidden_size': 2048, 'initializer_range': 0.02, 'intermediate_size': 8192, 'is_llama_config': True, 'max_position_embeddings': 2048, 'num_attention_heads': 32, 'num_hidden_layers': 24, 'num_key_value_heads': 32, 'pad_token_id': None, 'pretraining_tp': 1, 'rms_norm_eps': 1.0e-05, 'rope_scaling': None, 'rope_theta': 10000.0, 'tie_word_embeddings': True, 'use_cache': True, 'vocab_size': 50272 }, }, 'optimizer': { 'accumulate_grad_in_fp32': True, 'clip_grad': 1.0, 'learning_rate_scheduler': { 'learning_rate': 0.0001, 'lr_decay_style': 'linear', 'lr_warmup_style': 'linear', 'lr_warmup_steps': 1, 'min_decay_lr': 1.0e-05 }, 'optimizer_factory': { 'adam_beta1': 0.9, 'adam_beta2': 0.95, 'adam_eps': 1.0e-08, 'name': 'adamW', 'torch_adam_is_fused': True }, 'weight_decay': 0.01, 'zero_stage': 1 }, 'parallelism': { "dp": 1, 'expert_parallel_size': 1, "pp": 1, "pp_engine": "1f1b", "tp": 1, 'tp_linear_async_communication': False, 'tp_mode': 'REDUCE_SCATTER', 'recompute_layer': False }, 'profiler': { 'profiler_export_path': None, }, 'tokenizer': { 'tokenizer_max_length': None, 'tokenizer_name_or_path': 'openai-community/gpt2', 'tokenizer_revision': None }, 'data_stages': [ { 'name': 'Training Stage', 'start_training_step': 1, 'data': { 'dataset': { 'dataset_overwrite_cache': False, 'dataset_processing_num_proc_per_process': 64, 'hf_dataset_config_name': None, 'hf_dataset_or_datasets': 'roneneldan/TinyStories', 'hf_dataset_splits': 'train', 'text_column_name': 'text' }, 'num_loading_workers': 0, 'seed': 42 } } ], 'lighteval': None, 'tokens': { 'train_steps': 20, 'val_check_interval': -1, 'batch_accumulation_per_replica': 1, 'limit_test_batches': 0, 'limit_val_batches': 0, 'micro_batch_size': 2, 'sequence_length': 4096, }, 'logging': { 'iteration_step_info_interval': 1, 'log_level': 'info', 'log_level_replica': 'info' }, 'checkpoints': { 'checkpoint_interval': 100000, 'checkpoints_path': '/dev/null', 'resume_checkpoint_path': None } }
1
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/utils.py
import torch import os, sys import math import torch.distributed as dist from bench_cluster.communication.constants import * def print_rank_0(message): if dist.get_rank() == 0: print(message) def env2int(env_list, default=-1): for e in env_list: val = int(os.environ.get(e, -1)) if val >= 0: return val return default def init_torch_distributed(backend: str, local_rank: int): dist.init_process_group(backend, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"])) torch.cuda.set_device(local_rank) print_rank_0(f"Initializing distributed backend: {backend}") print_rank_0(f"RANK: {os.environ['RANK']}") print_rank_0(f"WORLD_SIZE: {os.environ['WORLD_SIZE']}") def print_header(bw_unit, raw, comm_op): world_size = dist.get_world_size() tput = f'Throughput ({bw_unit})' busbw = f'BusBW ({bw_unit})' header = f"\n---- Performance of {comm_op} on {world_size} devices ---------------------------------------------------------\n" duration_str = 'Duration' if raw: duration_str += ' (us)' header += f"{'Size (Bytes)':20s} {'Description':25s} {duration_str:20s} {tput:20s} {busbw:20s}\n" header += "----------------------------------------------------------------------------------------------------" print_rank_0(header) def get_bw(bw_unit, comm_op, size, duration): n = dist.get_world_size() tput = 0 busbw = 0 if comm_op == "all_to_all": tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_gather": size *= n tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_reduce": tput = (size * 2 / duration) busbw = (size / duration) * (2 * (n - 1) / n) elif comm_op == "p2p" or comm_op == "broadcast": tput = (size / duration) busbw = tput else: print_rank_0("wrong comm_op specified") exit(0) if bw_unit == 'Gbps': tput *= 8 busbw *= 8 return tput, busbw def get_metric_strings(raw, tput, busbw, duration): duration_ms = duration * 1e3 duration_us = duration * 1e6 tput = f'{tput / 1e9:.3f}' busbw = f'{busbw /1e9:.3f}' if duration_us < 1e3 or raw: duration = f'{duration_us:.3f}' if not raw: duration += ' us' else: duration = f'{duration_ms:.3f} ms' return tput, busbw, duration def sync_all(): torch.cuda.synchronize() dist.barrier() def max_numel(comm_op, dtype, mem_factor, local_rank): dtype_size = _element_size(dtype) max_memory_per_gpu = torch.cuda.get_device_properties(local_rank).total_memory * mem_factor if comm_op == 'all_reduce' or comm_op == 'p2p' or comm_op == 'broadcast': elements_per_gpu = int(max_memory_per_gpu // dtype_size) elif comm_op == 'all_gather': # all_gather performance is lower for non-powers of two, and the output buffer size scales with world size # Therefore, divide by world size and round down to nearest power of 2 elements_per_gpu = int(max_memory_per_gpu // dtype_size // dist.get_world_size()) elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) elif comm_op == 'all_to_all': # Number of elements must be divisible by world_size # all_to_all performance is lower for non-powers of two. Round down like all_gather. elements_per_gpu = int(max_memory_per_gpu // dtype_size) elements_per_gpu = int(dist.get_world_size() * round(elements_per_gpu / dist.get_world_size())) elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) else: print(f"This communication operation: {comm_op} is not supported yet") exit(0) return elements_per_gpu # Helper function to pretty-print message sizes def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i]) # Copied from torch. Need to add the func here for old torch compatibility. def _element_size(dtype): """ Returns the element size for a dtype, in bytes """ if not isinstance(dtype, torch.dtype): raise RuntimeError(f'expected torch.dtype, but got {type(dtype)}') if dtype.is_complex: return torch.finfo(dtype).bits >> 2 elif dtype.is_floating_point: return torch.finfo(dtype).bits >> 3 elif dtype == torch.bool: # NOTE: torch.bool is not supported in torch.iinfo() return 1 else: return torch.iinfo(dtype).bits >> 3
2
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/all_to_all.py
import os import torch import argparse import torch.distributed as dist from bench_cluster.communication.utils import sync_all, print_rank_0, get_bw, get_metric_strings, convert_size, print_header, max_numel, init_torch_distributed def timed_all_to_all(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw): sync_all() # Warmups, establish connections, etc. for i in range(warmups): dist.all_to_all_single(output, input, async_op=async_op) sync_all() # time the actual comm op trials times and average it start_event.record() for i in range(trials): dist.all_to_all_single(output, input, async_op=async_op) end_event.record() sync_all() duration = start_event.elapsed_time(end_event) / 1000 # maintain and clean performance data avg_duration = duration / trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw(bw_unit, 'all_to_all', size, avg_duration) tput_str, busbw_str, duration_str = get_metric_strings(raw, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not raw: size = convert_size(size) print_rank_0(f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_to_all(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug=False): world_size = dist.get_world_size() global_rank = dist.get_rank() # Prepare benchmark header print_header(bw_unit, raw, 'all_to_all') start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) if scan: M_LIST = [] for x in (2**p for p in range(1, maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, dtype)).cuda(local_rank) assert mat.numel() % world_size == 0, f"tensor cannot be divided in {world_size} chunks" sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) output = (mat.clone().view(-1)) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break else: raise e sync_all() timed_all_to_all(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor elements_per_gpu = max_numel('all_to_all', getattr(torch, dtype), mem_factor, local_rank) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) assert mat.numel( ) % world_size == 0, f"tensor with {mat.numel()} elements cannot be divided in {world_size} chunks" input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() return else: raise e sync_all() if debug: for i in range(world_size): if i == global_rank: print(f"Before AllToAll Input List at rank {global_rank}: {input}") dist.barrier() timed_all_to_all(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw) if debug: for i in range(world_size): if i == global_rank: print(f"AllToAll Results at rank {global_rank}: {output}") dist.barrier() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--trials', type=int, default=3) parser.add_argument('--warmups', type=int, default=1) parser.add_argument('--maxsize', type=int, default=24) parser.add_argument('--async_op', action='store_true') parser.add_argument('--bw_unit', type=str, default='Gbps') parser.add_argument('--scan', action='store_true') parser.add_argument('--raw', action='store_true') parser.add_argument('--dtype', type=str, default='float32') parser.add_argument('--mem_factor', type=float, default=0.1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() local_rank = int(os.environ['LOCAL_RANK']) init_torch_distributed("nccl", local_rank) run_all_to_all(local_rank, args.trials, args.warmups, args.maxsize, args.async_op, args.bw_unit, args.scan, args.raw, args.dtype, args.mem_factor, args.debug)
3
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/all_reduce.py
import os import torch import argparse import torch.distributed as dist from bench_cluster.communication.utils import sync_all, print_rank_0, get_bw, get_metric_strings, convert_size, print_header, max_numel, init_torch_distributed def timed_all_reduce(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw): sync_all() # Warmups, establish connections, etc. for i in range(warmups): dist.all_reduce(input, async_op=async_op) sync_all() # time the actual comm op trials times and average it start_event.record() for i in range(trials): dist.all_reduce(input, async_op=async_op) end_event.record() sync_all() duration = start_event.elapsed_time(end_event) / 1000 # maintain and clean performance data avg_duration = duration / trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw(bw_unit, 'all_reduce', size, avg_duration) tput_str, busbw_str, duration_str = get_metric_strings(raw, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not raw: size = convert_size(size) print_rank_0(f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_reduce(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug=False): # Prepare benchmark header print_header(bw_unit, raw, 'all_reduce') world_size = dist.get_world_size() global_rank = dist.get_rank() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) if scan: M_LIST = [] for x in (2**p for p in range(1, maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) del mat torch.cuda.empty_cache() except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() break else: raise e sync_all() timed_all_reduce(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so we double mem_factor elements_per_gpu = max_numel('all_reduce', getattr(torch, dtype), mem_factor * 2, local_rank) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() return else: raise e sync_all() timed_all_reduce(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--trials', type=int, default=3) parser.add_argument('--warmups', type=int, default=1) parser.add_argument('--maxsize', type=int, default=24) parser.add_argument('--async_op', action='store_true') parser.add_argument('--bw_unit', type=str, default='Gbps') parser.add_argument('--scan', action='store_true') parser.add_argument('--raw', action='store_true') parser.add_argument('--dtype', type=str, default='float32') parser.add_argument('--mem_factor', type=float, default=0.1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() local_rank = int(os.environ['LOCAL_RANK']) init_torch_distributed("nccl", local_rank) run_all_reduce(local_rank, args.trials, args.warmups, args.maxsize, args.async_op, args.bw_unit, args.scan, args.raw, args.dtype, args.mem_factor, args.debug)
4
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/p2p.py
import os import torch import argparse import torch.distributed as dist from bench_cluster.communication.utils import sync_all, print_rank_0, get_bw, get_metric_strings, convert_size, print_header, max_numel, init_torch_distributed def timed_p2p(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw): world_size = dist.get_world_size() rank = dist.get_rank() sync_all() # Warmups, establish connections, etc. for _ in range(warmups): for i in range(world_size): if i != rank: if async_op: if rank < i: dist.isend(input, i) else: dist.irecv(input, src=i) else: if rank < i: dist.send(input, i) else: dist.recv(input, src=i) sync_all() # time the actual comm op trials times and average it start_event.record() for _ in range(trials): for i in range(world_size): if i != rank: if async_op: if rank < i: dist.isend(input, i) else: dist.irecv(input, src=i) else: if rank < i: dist.send(input, i) else: dist.recv(input, src=i) end_event.record() sync_all() duration = start_event.elapsed_time(end_event) / 1000 # maintain and clean performance data avg_duration = duration / trials size = input.element_size() * input.nelement() n = world_size tput, busbw = get_bw(bw_unit, 'p2p', size * (n - 1), avg_duration) # Multiply size by (n-1) as each process communicates with all others tput_str, busbw_str, duration_str = get_metric_strings(raw, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not raw: size = convert_size(size) print_rank_0(f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_p2p(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug=False): # Prepare benchmark header print_header(bw_unit, raw, 'p2p') global_rank = dist.get_rank() world_size = dist.get_world_size() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) if scan: # Create list of message sizes M_LIST = [2**p for p in range(1, maxsize)] sync_all() # loop over various tensor sizes for M in M_LIST: try: mat = torch.ones(M, dtype=getattr(torch, dtype)).cuda(local_rank) sync_all() input = mat.mul_(float(global_rank)) del mat torch.cuda.empty_cache() except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break else: raise e sync_all() timed_p2p(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so double mem_factor elements_per_gpu = max_numel('p2p', getattr(torch, dtype), mem_factor * 2, local_rank) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) input = mat.mul_(float(global_rank)) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() return sync_all() timed_p2p(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--trials', type=int, default=3) parser.add_argument('--warmups', type=int, default=1) parser.add_argument('--maxsize', type=int, default=24) parser.add_argument('--async_op', action='store_true') parser.add_argument('--bw_unit', type=str, default='Gbps') parser.add_argument('--scan', action='store_true') parser.add_argument('--raw', action='store_true') parser.add_argument('--dtype', type=str, default='float32') parser.add_argument('--mem_factor', type=float, default=0.1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() local_rank = int(os.environ['LOCAL_RANK']) init_torch_distributed("nccl", local_rank) run_p2p(local_rank, args.trials, args.warmups, args.maxsize, args.async_op, args.bw_unit, args.scan, args.raw, args.dtype, args.mem_factor, args.debug)
5
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/constants.py
DEFAULT_WARMUPS = 5 DEFAULT_TRIALS = 50 DEFAULT_TYPE = 'float' DEFAULT_BACKEND = 'nccl' DEFAULT_UNIT = 'Gbps' DEFAULT_DIST = 'torch' DEFAULT_MAXSIZE = 24 TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
6
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/broadcast.py
import os import torch import argparse import torch.distributed as dist from bench_cluster.communication.utils import sync_all, print_rank_0, get_bw, get_metric_strings, convert_size, print_header, max_numel, init_torch_distributed def timed_broadcast(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw): sync_all() # Warmups, establish connections, etc. for i in range(warmups): dist.broadcast(input, 0, async_op=async_op) sync_all() # time the actual comm op trials times and average it start_event.record() for i in range(trials): dist.broadcast(input, 0, async_op=async_op) end_event.record() sync_all() duration = start_event.elapsed_time(end_event) / 1000 # maintain and clean performance data avg_duration = duration / trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw(bw_unit, 'broadcast', size, avg_duration) tput_str, busbw_str, duration_str = get_metric_strings(raw, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not raw: size = convert_size(size) print_rank_0(f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_broadcast(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug=False): # Prepare benchmark header print_header(bw_unit, raw, 'broadcast') world_size = dist.get_world_size() global_rank = dist.get_rank() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) if scan: M_LIST = [] for x in (2**p for p in range(1, maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) del mat torch.cuda.empty_cache() except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break else: raise e sync_all() timed_broadcast(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so we double mem_factor elements_per_gpu = max_numel('broadcast', getattr(torch, dtype), mem_factor * 2, local_rank) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() return sync_all() timed_broadcast(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--trials', type=int, default=3) parser.add_argument('--warmups', type=int, default=1) parser.add_argument('--maxsize', type=int, default=24) parser.add_argument('--async_op', action='store_true') parser.add_argument('--bw_unit', type=str, default='Gbps') parser.add_argument('--scan', action='store_true') parser.add_argument('--raw', action='store_true') parser.add_argument('--dtype', type=str, default='float32') parser.add_argument('--mem_factor', type=float, default=0.1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() local_rank = int(os.environ['LOCAL_RANK']) init_torch_distributed("nccl", local_rank) run_broadcast(local_rank, args.trials, args.warmups, args.maxsize, args.async_op, args.bw_unit, args.scan, args.raw, args.dtype, args.mem_factor, args.debug)
7
0
hf_public_repos/bench_cluster/bench_cluster
hf_public_repos/bench_cluster/bench_cluster/communication/all_gather.py
# https://raw.githubusercontent.com/EleutherAI/cookbook/main/benchmarks/communication/all_gather.py import os import torch import argparse import torch.distributed as dist from bench_cluster.communication.utils import sync_all, print_rank_0, get_bw, get_metric_strings, convert_size, print_header, max_numel, init_torch_distributed # Run all_gather and print metrics def timed_all_gather(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw): sync_all() # Warmups, establish connections, etc. for i in range(warmups): # Test all gather output_tensors = list(torch.chunk(output, dist.get_world_size(), dim=0)) dist.all_gather(output_tensors, input, group=None, async_op=async_op) sync_all() # time the actual comm op trials times and average it start_event.record() for i in range(trials): output_tensors = list(torch.chunk(output, dist.get_world_size(), dim=0)) dist.all_gather(output_tensors, input, group=None, async_op=async_op) end_event.record() sync_all() duration = start_event.elapsed_time(end_event) / 1000 # maintain and clean performance data avg_duration = duration / trials size = input.element_size() * input.nelement() tput, busbw = get_bw(bw_unit, 'all_gather', size, avg_duration) tput_str, busbw_str, duration_str = get_metric_strings(raw, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not raw: size = convert_size(size) print_rank_0(f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_gather(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug): # Prepare benchmark header print_header(bw_unit, raw, 'all_gather') global_rank = dist.get_rank() world_size = dist.get_world_size() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) if scan: # Create list of message sizes M_LIST = [] for x in (2**p for p in range(1, maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(input.nelement() * world_size, dtype=getattr(torch, dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break else: raise e sync_all() timed_all_gather(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw) else: if hasattr(dist, 'all_gather_into_tensor'): mem_factor = mem_factor + 0.2 else: mem_factor = mem_factor # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor sync_all() elements_per_gpu = max_numel('all_gather', getattr(torch, dtype), mem_factor, local_rank) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank) # multiply each GPU's tensor by the rank to ease debugging input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(elements_per_gpu * world_size, dtype=getattr(torch, dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!') sync_all() return else: raise e sync_all() timed_all_gather(input, output, start_event, end_event, warmups, trials, async_op, bw_unit, raw) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--trials', type=int, default=3) parser.add_argument('--warmups', type=int, default=1) parser.add_argument('--maxsize', type=int, default=24) parser.add_argument('--async_op', action='store_true') parser.add_argument('--bw_unit', type=str, default='Gbps') parser.add_argument('--scan', action='store_true') parser.add_argument('--raw', action='store_true') parser.add_argument('--dtype', type=str, default='float32') parser.add_argument('--mem_factor', type=float, default=0.1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() local_rank = int(os.environ["LOCAL_RANK"]) init_torch_distributed("nccl", local_rank) run_all_gather(local_rank, args.trials, args.warmups, args.maxsize, args.async_op, args.bw_unit, args.scan, args.raw, args.dtype, args.mem_factor, args.debug)
8
0
hf_public_repos
hf_public_repos/autotrain-advanced-api/version
0.0.1
9
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/whisper/README.md
# candle-whisper: speech recognition An implementation of [OpenAI Whisper](https://github.com/openai/whisper) using candle. Whisper is a general purpose speech recognition model, it can be used to convert audio files (in the `.wav` format) to text. Supported features include language detection as well as multilingual speech recognition. ## Running some example If no audio file is passed as input, a [sample file](https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav) is automatically downloaded from the hub. ```bash cargo run --example whisper --release --features="symphonia" > No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav > loaded wav data: Header { audio_format: 1, channel_count: 1, sampling_rate: 16000, bytes_per_second: 32000, bytes_per_sample: 2, bits_per_sample: 16 } > pcm data loaded 176000 > loaded mel: [1, 80, 3000] > 0.0s -- 30.0s: And so my fellow Americans ask not what your country can do for you ask what you can do for your country ``` In order to use the multilingual mode, specify a multilingual model via the `--model` flag, see the details below. ## Command line flags - `--input`: the audio file to be converted to text, in wav format. - `--language`: force the language to some specific value rather than being detected, e.g. `en`. - `--task`: the task to be performed, can be `transcribe` (return the text data in the original language) or `translate` (translate the text to English). - `--timestamps`: enable the timestamp mode where some timestamps are reported for each recognized audio extracts. - `--model`: the model to be used. Models that do not end with `-en` are multilingual models, other ones are English only models. The supported OpenAI Whisper models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`, `large-v2` and `large-v3`. The supported Distil-Whisper models are `distil-medium.en`, `distil-large-v2` and `distil-large-v3`.
0
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/flux/t5_tokenizer.py
from transformers import AutoModelForCausalLM, AutoTokenizer BASE_MODEL = "google/t5-v1_1-xxl" tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) # The tokenizer will be saved in /tmp/tokenizer/tokenizer.json tokenizer.save_pretrained("/tmp/tokenizer/")
1
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/flux/main.rs
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use candle_transformers::models::{clip, flux, t5}; use anyhow::{Error as E, Result}; use candle::{IndexOp, Module, Tensor}; use candle_nn::VarBuilder; use clap::Parser; use tokenizers::Tokenizer; #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Args { /// The prompt to be used for image generation. #[arg(long, default_value = "A rusty robot walking on a beach")] prompt: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Use the quantized model. #[arg(long)] quantized: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The height in pixels of the generated image. #[arg(long)] height: Option<usize>, /// The width in pixels of the generated image. #[arg(long)] width: Option<usize>, #[arg(long)] decode_only: Option<String>, #[arg(long, value_enum, default_value = "schnell")] model: Model, /// Use the slower kernels. #[arg(long)] use_dmmv: bool, /// The seed to use when generating random samples. #[arg(long)] seed: Option<u64>, } #[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Eq)] enum Model { Schnell, Dev, } fn run(args: Args) -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let Args { prompt, cpu, height, width, tracing, decode_only, model, quantized, .. } = args; let width = width.unwrap_or(1360); let height = height.unwrap_or(768); let _guard = if tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let api = hf_hub::api::sync::Api::new()?; let bf_repo = { let name = match model { Model::Dev => "black-forest-labs/FLUX.1-dev", Model::Schnell => "black-forest-labs/FLUX.1-schnell", }; api.repo(hf_hub::Repo::model(name.to_string())) }; let device = candle_examples::device(cpu)?; if let Some(seed) = args.seed { device.set_seed(seed)?; } let dtype = device.bf16_default_to_f32(); let img = match decode_only { None => { let t5_emb = { let repo = api.repo(hf_hub::Repo::with_revision( "google/t5-v1_1-xxl".to_string(), hf_hub::RepoType::Model, "refs/pr/2".to_string(), )); let model_file = repo.get("model.safetensors")?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: t5::Config = serde_json::from_str(&config)?; let mut model = t5::T5EncoderModel::load(vb, &config)?; let tokenizer_filename = api .model("lmz/mt5-tokenizers".to_string()) .get("t5-v1_1-xxl.tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let mut tokens = tokenizer .encode(prompt.as_str(), true) .map_err(E::msg)? .get_ids() .to_vec(); tokens.resize(256, 0); let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?; println!("{input_token_ids}"); model.forward(&input_token_ids)? }; println!("T5\n{t5_emb}"); let clip_emb = { let repo = api.repo(hf_hub::Repo::model( "openai/clip-vit-large-patch14".to_string(), )); let model_file = repo.get("model.safetensors")?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json let config = clip::text_model::ClipTextConfig { vocab_size: 49408, projection_dim: 768, activation: clip::text_model::Activation::QuickGelu, intermediate_size: 3072, embed_dim: 768, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 12, num_attention_heads: 12, }; let model = clip::text_model::ClipTextTransformer::new(vb.pp("text_model"), &config)?; let tokenizer_filename = repo.get("tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let tokens = tokenizer .encode(prompt.as_str(), true) .map_err(E::msg)? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?; println!("{input_token_ids}"); model.forward(&input_token_ids)? }; println!("CLIP\n{clip_emb}"); let img = { let cfg = match model { Model::Dev => flux::model::Config::dev(), Model::Schnell => flux::model::Config::schnell(), }; let img = flux::sampling::get_noise(1, height, width, &device)?.to_dtype(dtype)?; let state = if quantized { flux::sampling::State::new( &t5_emb.to_dtype(candle::DType::F32)?, &clip_emb.to_dtype(candle::DType::F32)?, &img.to_dtype(candle::DType::F32)?, )? } else { flux::sampling::State::new(&t5_emb, &clip_emb, &img)? }; let timesteps = match model { Model::Dev => { flux::sampling::get_schedule(50, Some((state.img.dim(1)?, 0.5, 1.15))) } Model::Schnell => flux::sampling::get_schedule(4, None), }; println!("{state:?}"); println!("{timesteps:?}"); if quantized { let model_file = match model { Model::Schnell => api .repo(hf_hub::Repo::model("lmz/candle-flux".to_string())) .get("flux1-schnell.gguf")?, Model::Dev => todo!(), }; let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( model_file, &device, )?; let model = flux::quantized_model::Flux::new(&cfg, vb)?; flux::sampling::denoise( &model, &state.img, &state.img_ids, &state.txt, &state.txt_ids, &state.vec, &timesteps, 4., )? .to_dtype(dtype)? } else { let model_file = match model { Model::Schnell => bf_repo.get("flux1-schnell.safetensors")?, Model::Dev => bf_repo.get("flux1-dev.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; let model = flux::model::Flux::new(&cfg, vb)?; flux::sampling::denoise( &model, &state.img, &state.img_ids, &state.txt, &state.txt_ids, &state.vec, &timesteps, 4., )? } }; flux::sampling::unpack(&img, height, width)? } Some(file) => { let mut st = candle::safetensors::load(file, &device)?; st.remove("img").unwrap().to_dtype(dtype)? } }; println!("latent img\n{img}"); let img = { let model_file = bf_repo.get("ae.safetensors")?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; let cfg = match model { Model::Dev => flux::autoencoder::Config::dev(), Model::Schnell => flux::autoencoder::Config::schnell(), }; let model = flux::autoencoder::AutoEncoder::new(&cfg, vb)?; model.decode(&img)? }; println!("img\n{img}"); let img = ((img.clamp(-1f32, 1f32)? + 1.0)? * 127.5)?.to_dtype(candle::DType::U8)?; candle_examples::save_image(&img.i(0)?, "out.jpg")?; Ok(()) } fn main() -> Result<()> { let args = Args::parse(); #[cfg(feature = "cuda")] candle::quantized::cuda::set_force_dmmv(args.use_dmmv); run(args) }
2
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/flux/README.md
# candle-flux: image generation with latent rectified flow transformers ![rusty robot holding a candle](./assets/flux-robot.jpg) Flux is a 12B rectified flow transformer capable of generating images from text descriptions, [huggingface](https://huggingface.co/black-forest-labs/FLUX.1-schnell), [github](https://github.com/black-forest-labs/flux), [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/). ## Running the model ```bash cargo run --features cuda --example flux -r -- \ --height 1024 --width 1024 \ --prompt "a rusty robot walking on a beach holding a small torch, the robot has the word "rust" written on it, high quality, 4k" ```
3
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/clip/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use candle_transformers::models::clip; use tokenizers::Tokenizer; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long, use_value_delimiter = true)] images: Option<Vec<String>>, #[arg(long)] cpu: bool, #[arg(long, use_value_delimiter = true)] sequences: Option<Vec<String>>, } fn load_image<T: AsRef<std::path::Path>>(path: T, image_size: usize) -> anyhow::Result<Tensor> { let img = image::ImageReader::open(path)?.decode()?; let (height, width) = (image_size, image_size); let img = img.resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let img = img.into_raw(); let img = Tensor::from_vec(img, (height, width, 3), &Device::Cpu)? .permute((2, 0, 1))? .to_dtype(DType::F32)? .affine(2. / 255., -1.)?; Ok(img) } fn load_images<T: AsRef<std::path::Path>>( paths: &Vec<T>, image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for path in paths { let tensor = load_image(path, image_size)?; images.push(tensor); } let images = Tensor::stack(&images, 0)?; Ok(images) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("model.safetensors")? } Some(model) => model.into(), }; let tokenizer = get_tokenizer(args.tokenizer)?; let config = clip::ClipConfig::vit_base_patch32(); let device = candle_examples::device(args.cpu)?; let vec_imgs = match args.images { Some(imgs) => imgs, None => vec![ "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(), "candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(), ], }; let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file.clone()], DType::F32, &device)? }; let model = clip::ClipModel::new(vb, &config)?; let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?; let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?; println!("softmax_image_vec: {:?}", softmax_image_vec); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) .collect::<Vec<f32>>(); let probability_per_image = probability_vec.len() / vec_imgs.len(); for (i, img) in vec_imgs.iter().enumerate() { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; println!("\n\nResults for image: {}\n", img); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {} ", p, vec_seq[i]); } } Ok(()) } pub fn get_tokenizer(tokenizer: Option<String>) -> anyhow::Result<Tokenizer> { let tokenizer = match tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("tokenizer.json")? } Some(file) => file.into(), }; Tokenizer::from_file(tokenizer).map_err(E::msg) } pub fn tokenize_sequences( sequences: Option<Vec<String>>, tokenizer: &Tokenizer, device: &Device, ) -> anyhow::Result<(Tensor, Vec<String>)> { let pad_id = *tokenizer .get_vocab(true) .get("<|endoftext|>") .ok_or(E::msg("No pad token"))?; let vec_seq = match sequences { Some(seq) => seq, None => vec![ "a cycling race".to_string(), "a photo of two cats".to_string(), "a robot holding a candle".to_string(), ], }; let mut tokens = vec![]; for seq in vec_seq.clone() { let encoding = tokenizer.encode(seq, true).map_err(E::msg)?; tokens.push(encoding.get_ids().to_vec()); } let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0); // Pad the sequences to have the same length for token_vec in tokens.iter_mut() { let len_diff = max_len - token_vec.len(); if len_diff > 0 { token_vec.extend(vec![pad_id; len_diff]); } } let input_ids = Tensor::new(tokens, device)?; Ok((input_ids, vec_seq)) }
4
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/clip/README.md
# candle-clip Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on pairs of images with related texts. https://github.com/openai/CLIP https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip ## Running on an example on cpu ``` $ cargo run --example clip --release -- --images "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg","candle-examples/examples/yolo-v8/assets/bike.jpg" --cpu --sequences "a cycling race","a photo of two cats","a robot holding a candle" Results for image: candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg INFO clip: Probability: 0.0000% Text: a cycling race INFO clip: Probability: 0.0000% Text: a photo of two cats INFO clip: Probability: 100.0000% Text: a robot holding a candle Results for image: candle-examples/examples/yolo-v8/assets/bike.jpg INFO clip: Probability: 99.9999% Text: a cycling race INFO clip: Probability: 0.0001% Text: a photo of two cats INFO clip: Probability: 0.0000% Text: a robot holding a candle ``` ## Running on an example with metal feature (mac) ``` $ cargo run --features metal --example clip --release -- --images "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg","candle-examples/examples/yolo-v8/assets/bike.jpg" --cpu --sequences "a cycling race","a photo of two cats","a robot holding a candle" Results for image: candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg INFO clip: Probability: 0.0000% Text: a cycling race INFO clip: Probability: 0.0000% Text: a photo of two cats INFO clip: Probability: 100.0000% Text: a robot holding a candle Results for image: candle-examples/examples/yolo-v8/assets/bike.jpg INFO clip: Probability: 99.9999% Text: a cycling race INFO clip: Probability: 0.0001% Text: a photo of two cats INFO clip: Probability: 0.0000% Text: a robot holding a candle ```
5
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/qwen/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::qwen2::{Config as ConfigBase, ModelForCausalLM as ModelBase}; use candle_transformers::models::qwen2_moe::{Config as ConfigMoe, Model as ModelMoe}; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { Base(ModelBase), Moe(ModelMoe), } impl Model { fn forward(&mut self, xs: &Tensor, s: usize) -> candle::Result<Tensor> { match self { Self::Moe(ref mut m) => m.forward(xs, s), Self::Base(ref mut m) => m.forward(xs, s), } } } struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the <|endoftext|> token"), }; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, start_pos)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)] enum WhichModel { #[value(name = "0.5b")] W0_5b, #[value(name = "1.8b")] W1_8b, #[value(name = "4b")] W4b, #[value(name = "7b")] W7b, #[value(name = "14b")] W14b, #[value(name = "72b")] W72b, #[value(name = "moe-a2.7b")] MoeA27b, #[value(name = "2-0.5b")] W2_0_5b, #[value(name = "2-1.5b")] W2_1_5b, #[value(name = "2-7b")] W2_7b, #[value(name = "2-72b")] W2_72b, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 10000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long, default_value = "0.5b")] model: WhichModel, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id, None => { let (version, size) = match args.model { WhichModel::W2_0_5b => ("2", "0.5B"), WhichModel::W2_1_5b => ("2", "1.5B"), WhichModel::W2_7b => ("2", "7B"), WhichModel::W2_72b => ("2", "72B"), WhichModel::W0_5b => ("1.5", "0.5B"), WhichModel::W1_8b => ("1.5", "1.8B"), WhichModel::W4b => ("1.5", "4B"), WhichModel::W7b => ("1.5", "7B"), WhichModel::W14b => ("1.5", "14B"), WhichModel::W72b => ("1.5", "72B"), WhichModel::MoeA27b => ("1.5", "MoE-A2.7B"), }; format!("Qwen/Qwen{version}-{size}") } }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => match args.model { WhichModel::W0_5b | WhichModel::W2_0_5b | WhichModel::W2_1_5b | WhichModel::W1_8b => { vec![repo.get("model.safetensors")?] } WhichModel::W4b | WhichModel::W7b | WhichModel::W2_7b | WhichModel::W14b | WhichModel::W72b | WhichModel::W2_72b | WhichModel::MoeA27b => { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } }, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config_file = repo.get("config.json")?; let device = candle_examples::device(args.cpu)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = match args.model { WhichModel::MoeA27b => { let config: ConfigMoe = serde_json::from_slice(&std::fs::read(config_file)?)?; Model::Moe(ModelMoe::new(&config, vb)?) } _ => { let config: ConfigBase = serde_json::from_slice(&std::fs::read(config_file)?)?; Model::Base(ModelBase::new(&config, vb)?) } }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
6
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/qwen/README.md
# candle-qwen: large language model series from Alibaba Cloud Qwen 1.5 is a series of large language models that provide strong performances on English and Chinese. - [Blog post](https://qwenlm.github.io/blog/qwen1.5/) introducing Qwen1.5. - [Model card](https://huggingface.co/Qwen/Qwen1.5-0.5B) on the HuggingFace Hub. - [Blog post](https://qwenlm.github.io/blog/qwen-moe/) for the mixture-of-experts (MoE) variant. ## Running the example ```bash $ cargo run --example qwen --release -- --prompt "Hello there " ``` Various model sizes are available via the `--model` argument, including the MoE variant. ```bash $ cargo run --example qwen --release -- --model moe-a2.7b --prompt 'def print_prime(n: int): ' def print_prime(n: int): # n is the number of primes to be printed for i in range(2, n + 1): if all(i % j != 0 for j in range(2, i)): print(i) ```
7
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/trocr/readme.md
# candle-trocr `TrOCR` is a transformer OCR Model. In this example it is used to transcribe image text. See the associated [model card](https://huggingface.co/microsoft/trocr-base-printed) for details on the model itself. Supported models include: - `--which base`: small handwritten OCR model. - `--which large`: large handwritten OCR model. - `--which base-printed`: small printed OCR model. - `--which large-printed`: large printed OCR model. ## Running an example ```bash cargo run --example trocr --release -- --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which large --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which base-printed --image candle-examples/examples/trocr/assets/noto.png cargo run --example trocr --release -- --which large-printed --image candle-examples/examples/trocr/assets/noto.png ``` ### Outputs ``` industry , Mr. Brown commented icily . " Let us have a industry , " Mr. Brown commented icily . " Let us have a THE QUICK BROWN FOR JUMPS OVER THE LAY DOG THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG ```
8
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/trocr/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::{trocr, vit}; use tokenizers::Tokenizer; mod image_processor; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { #[value(name = "base")] BaseHandwritten, #[value(name = "large")] LargeHandwritten, BasePrinted, LargePrinted, } impl Which { fn repo_and_branch_name(&self) -> (&str, &str) { match self { Self::BaseHandwritten => ("microsoft/trocr-base-handwritten", "refs/pr/3"), Self::LargeHandwritten => ("microsoft/trocr-large-handwritten", "refs/pr/6"), Self::BasePrinted => ("microsoft/trocr-base-printed", "refs/pr/7"), Self::LargePrinted => ("microsoft/trocr-large-printed", "main"), } } } #[derive(Debug, Clone, serde::Deserialize)] struct Config { encoder: vit::Config, decoder: trocr::TrOCRConfig, } #[derive(Parser, Debug)] struct Args { #[arg(long)] model: Option<String>, /// Choose the variant of the model to run. #[arg(long, default_value = "base")] which: Which, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The image file to be processed. #[arg(long)] image: String, /// Tokenization config. #[arg(long)] tokenizer: Option<String>, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let api = hf_hub::api::sync::Api::new()?; let mut tokenizer_dec = { let tokenizer_file = match args.tokenizer { None => api .model(String::from("ToluClassics/candle-trocr-tokenizer")) .get("tokenizer.json")?, Some(tokenizer) => std::path::PathBuf::from(tokenizer), }; let tokenizer = Tokenizer::from_file(&tokenizer_file).map_err(E::msg)?; TokenOutputStream::new(tokenizer) }; let device = candle_examples::device(args.cpu)?; let vb = { let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => { let (repo, branch) = args.which.repo_and_branch_name(); api.repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("model.safetensors")? } }; println!("model: {:?}", model); unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? } }; let (encoder_config, decoder_config) = { let (repo, branch) = args.which.repo_and_branch_name(); let config_filename = api .repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("config.json")?; let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?; (config.encoder, config.decoder) }; let mut model = trocr::TrOCRModel::new(&encoder_config, &decoder_config, vb)?; let processor_config = image_processor::ProcessorConfig::default(); let processor = image_processor::ViTImageProcessor::new(&processor_config); let image = vec![args.image.as_str()]; let image = processor.preprocess(image)?.to_device(&device)?; let encoder_xs = model.encoder().forward(&image)?; let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let mut token_ids: Vec<u32> = vec![decoder_config.decoder_start_token_id]; for index in 0..1000 { let context_size = if index >= 1 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.decode(&input_ids, &encoder_xs, start_pos)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; token_ids.push(token); if let Some(t) = tokenizer_dec.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } if token == decoder_config.eos_token_id { break; } } if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
9
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/mod.rs
//! Stable Diffusion //! //! Stable Diffusion is a latent text-to-image diffusion model capable of //! generating photo-realistic images given any text input. //! //! - 💻 [Original Repository](https://github.com/CompVis/stable-diffusion) //! - 🤗 [Hugging Face](https://huggingface.co/runwayml/stable-diffusion-v1-5) //! - The default scheduler for the v1.5, v2.1 and XL 1.0 version is the Denoising Diffusion Implicit Model scheduler (DDIM). The original paper and some code can be found in the [associated repo](https://github.com/ermongroup/ddim). The default scheduler for the XL Turbo version is the Euler Ancestral scheduler. //! //! //! # Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" alt="rusty robot holding a candle" width=320> //! </div> //! //! _"A rusty robot holding a fire torch in its hand."_ Generated by Stable Diffusion XL using Rust and [candle](https://github.com/huggingface/candle). //! //! ```bash //! # example running with cuda //! # see the candle-examples/examples/stable-diffusion for all options //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" //! //! # with sd-turbo //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \ //! --sd-version turbo //! //! # with flash attention. //! # feature flag: `--features flash-attn` //! # cli flag: `--use-flash-attn`. //! # flash-attention-v2 is only compatible with Ampere, Ada, \ //! # or Hopper GPUs (e.g., A100/H100, RTX 3090/4090). //! cargo run --example stable-diffusion --release --features=cuda,cudnn \ //! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \ //! --use-flash-attn //! ``` pub mod attention; pub mod clip; pub mod ddim; pub mod ddpm; pub mod embeddings; pub mod euler_ancestral_discrete; pub mod resnet; pub mod schedulers; pub mod unet_2d; pub mod unet_2d_blocks; pub mod utils; pub mod vae; use std::sync::Arc; use candle::{DType, Device, Result}; use candle_nn as nn; use self::schedulers::{Scheduler, SchedulerConfig}; #[derive(Clone, Debug)] pub struct StableDiffusionConfig { pub width: usize, pub height: usize, pub clip: clip::Config, pub clip2: Option<clip::Config>, autoencoder: vae::AutoEncoderKLConfig, unet: unet_2d::UNet2DConditionModelConfig, scheduler: Arc<dyn SchedulerConfig>, } impl StableDiffusionConfig { pub fn v1_5( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 8), bc(640, Some(1), 8), bc(1280, Some(1), 8), bc(1280, None, 8), ], center_input_sample: false, cross_attention_dim: 768, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: false, }; let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type: schedulers::PredictionType::Epsilon, ..Default::default() }); StableDiffusionConfig { width, height, clip: clip::Config::v1_5(), clip2: None, autoencoder, scheduler, unet, } } fn v2_1_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 5), bc(640, Some(1), 10), bc(1280, Some(1), 20), bc(1280, None, 20), ], center_input_sample: false, cross_attention_dim: 1024, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 768 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 768 }; StableDiffusionConfig { width, height, clip: clip::Config::v2_1(), clip2: None, autoencoder, scheduler, unet, } } pub fn v2_1( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/scheduler/scheduler_config.json Self::v2_1_( sliced_attention_size, height, width, schedulers::PredictionType::VPrediction, ) } fn sdxl_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; StableDiffusionConfig { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } fn sdxl_turbo_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new( euler_ancestral_discrete::EulerAncestralDiscreteSchedulerConfig { prediction_type, timestep_spacing: schedulers::TimestepSpacing::Trailing, ..Default::default() }, ); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; Self { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } pub fn sdxl( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn sdxl_turbo( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_turbo_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn ssd1b( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; Self { width, height, clip: clip::Config::ssd1b(), clip2: Some(clip::Config::ssd1b2()), autoencoder, scheduler, unet, } } pub fn build_vae<P: AsRef<std::path::Path>>( &self, vae_weights: P, device: &Device, dtype: DType, ) -> Result<vae::AutoEncoderKL> { let vs_ae = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[vae_weights], dtype, device)? }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKL::new(vs_ae, 3, 3, self.autoencoder.clone())?; Ok(autoencoder) } pub fn build_unet<P: AsRef<std::path::Path>>( &self, unet_weights: P, device: &Device, in_channels: usize, use_flash_attn: bool, dtype: DType, ) -> Result<unet_2d::UNet2DConditionModel> { let vs_unet = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[unet_weights], dtype, device)? }; let unet = unet_2d::UNet2DConditionModel::new( vs_unet, in_channels, 4, use_flash_attn, self.unet.clone(), )?; Ok(unet) } pub fn build_scheduler(&self, n_steps: usize) -> Result<Box<dyn Scheduler>> { self.scheduler.build(n_steps) } } pub fn build_clip_transformer<P: AsRef<std::path::Path>>( clip: &clip::Config, clip_weights: P, device: &Device, dtype: DType, ) -> Result<clip::ClipTextTransformer> { let vs = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[clip_weights], dtype, device)? }; let text_model = clip::ClipTextTransformer::new(vs, clip)?; Ok(text_model) }
0
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/embeddings.rs
use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] pub struct TimestepEmbedding { linear_1: nn::Linear, linear_2: nn::Linear, } impl TimestepEmbedding { // act_fn: "silu" pub fn new(vs: nn::VarBuilder, channel: usize, time_embed_dim: usize) -> Result<Self> { let linear_1 = nn::linear(channel, time_embed_dim, vs.pp("linear_1"))?; let linear_2 = nn::linear(time_embed_dim, time_embed_dim, vs.pp("linear_2"))?; Ok(Self { linear_1, linear_2 }) } } impl Module for TimestepEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = nn::ops::silu(&self.linear_1.forward(xs)?)?; self.linear_2.forward(&xs) } } #[derive(Debug)] pub struct Timesteps { num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64, } impl Timesteps { pub fn new(num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64) -> Self { Self { num_channels, flip_sin_to_cos, downscale_freq_shift, } } } impl Module for Timesteps { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let half_dim = (self.num_channels / 2) as u32; let exponent = (Tensor::arange(0, half_dim, xs.device())?.to_dtype(candle::DType::F32)? * -f64::ln(10000.))?; let exponent = (exponent / (half_dim as f64 - self.downscale_freq_shift))?; let emb = exponent.exp()?.to_dtype(xs.dtype())?; // emb = timesteps[:, None].float() * emb[None, :] let emb = xs.unsqueeze(D::Minus1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let (cos, sin) = (emb.cos()?, emb.sin()?); let emb = if self.flip_sin_to_cos { Tensor::cat(&[&cos, &sin], D::Minus1)? } else { Tensor::cat(&[&sin, &cos], D::Minus1)? }; if self.num_channels % 2 == 1 { emb.pad_with_zeros(D::Minus2, 0, 1) } else { Ok(emb) } } }
1
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/resnet.rs
//! ResNet Building Blocks //! //! Some Residual Network blocks used in UNet models. //! //! Denoising Diffusion Implicit Models, K. He and al, 2015. //! - [Paper](https://arxiv.org/abs/1512.03385) //! use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; /// Configuration for a ResNet block. #[derive(Debug, Clone, Copy)] pub struct ResnetBlock2DConfig { /// The number of output channels, defaults to the number of input channels. pub out_channels: Option<usize>, pub temb_channels: Option<usize>, /// The number of groups to use in group normalization. pub groups: usize, pub groups_out: Option<usize>, /// The epsilon to be used in the group normalization operations. pub eps: f64, /// Whether to use a 2D convolution in the skip connection. When using None, /// such a convolution is used if the number of input channels is different from /// the number of output channels. pub use_in_shortcut: Option<bool>, // non_linearity: silu /// The final output is scaled by dividing by this value. pub output_scale_factor: f64, } impl Default for ResnetBlock2DConfig { fn default() -> Self { Self { out_channels: None, temb_channels: Some(512), groups: 32, groups_out: None, eps: 1e-6, use_in_shortcut: None, output_scale_factor: 1., } } } #[derive(Debug)] pub struct ResnetBlock2D { norm1: nn::GroupNorm, conv1: Conv2d, norm2: nn::GroupNorm, conv2: Conv2d, time_emb_proj: Option<nn::Linear>, conv_shortcut: Option<Conv2d>, span: tracing::Span, config: ResnetBlock2DConfig, } impl ResnetBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, config: ResnetBlock2DConfig, ) -> Result<Self> { let out_channels = config.out_channels.unwrap_or(in_channels); let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 1, groups: 1, dilation: 1, }; let norm1 = nn::group_norm(config.groups, in_channels, config.eps, vs.pp("norm1"))?; let conv1 = conv2d(in_channels, out_channels, 3, conv_cfg, vs.pp("conv1"))?; let groups_out = config.groups_out.unwrap_or(config.groups); let norm2 = nn::group_norm(groups_out, out_channels, config.eps, vs.pp("norm2"))?; let conv2 = conv2d(out_channels, out_channels, 3, conv_cfg, vs.pp("conv2"))?; let use_in_shortcut = config .use_in_shortcut .unwrap_or(in_channels != out_channels); let conv_shortcut = if use_in_shortcut { let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 0, groups: 1, dilation: 1, }; Some(conv2d( in_channels, out_channels, 1, conv_cfg, vs.pp("conv_shortcut"), )?) } else { None }; let time_emb_proj = match config.temb_channels { None => None, Some(temb_channels) => Some(nn::linear( temb_channels, out_channels, vs.pp("time_emb_proj"), )?), }; let span = tracing::span!(tracing::Level::TRACE, "resnet2d"); Ok(Self { norm1, conv1, norm2, conv2, time_emb_proj, span, config, conv_shortcut, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut_xs = match &self.conv_shortcut { Some(conv_shortcut) => conv_shortcut.forward(xs)?, None => xs.clone(), }; let xs = self.norm1.forward(xs)?; let xs = self.conv1.forward(&nn::ops::silu(&xs)?)?; let xs = match (temb, &self.time_emb_proj) { (Some(temb), Some(time_emb_proj)) => time_emb_proj .forward(&nn::ops::silu(temb)?)? .unsqueeze(D::Minus1)? .unsqueeze(D::Minus1)? .broadcast_add(&xs)?, _ => xs, }; let xs = self .conv2 .forward(&nn::ops::silu(&self.norm2.forward(&xs)?)?)?; (shortcut_xs + xs)? / self.config.output_scale_factor } }
2
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/ddim.rs
//! # Denoising Diffusion Implicit Models //! //! The Denoising Diffusion Implicit Models (DDIM) is a simple scheduler //! similar to Denoising Diffusion Probabilistic Models (DDPM). The DDPM //! generative process is the reverse of a Markovian process, DDIM generalizes //! this to non-Markovian guidance. //! //! Denoising Diffusion Implicit Models, J. Song et al, 2020. //! https://arxiv.org/abs/2010.02502 use super::schedulers::{ betas_for_alpha_bar, BetaSchedule, PredictionType, Scheduler, SchedulerConfig, TimestepSpacing, }; use candle::{Result, Tensor}; /// The configuration for the DDIM scheduler. #[derive(Debug, Clone, Copy)] pub struct DDIMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// The amount of noise to be added at each step. pub eta: f64, /// Adjust the indexes of the inference schedule by this value. pub steps_offset: usize, /// prediction type of the scheduler function, one of `epsilon` (predicting /// the noise of the diffusion process), `sample` (directly predicting the noisy sample`) /// or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) pub prediction_type: PredictionType, /// number of diffusion steps used to train the model pub train_timesteps: usize, /// time step spacing for the diffusion process pub timestep_spacing: TimestepSpacing, } impl Default for DDIMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085f64, beta_end: 0.012f64, beta_schedule: BetaSchedule::ScaledLinear, eta: 0., steps_offset: 1, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, timestep_spacing: TimestepSpacing::Leading, } } } impl SchedulerConfig for DDIMSchedulerConfig { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> { Ok(Box::new(DDIMScheduler::new(inference_steps, *self)?)) } } /// The DDIM scheduler. #[derive(Debug, Clone)] pub struct DDIMScheduler { timesteps: Vec<usize>, alphas_cumprod: Vec<f64>, step_ratio: usize, init_noise_sigma: f64, pub config: DDIMSchedulerConfig, } // clip_sample: False, set_alpha_to_one: False impl DDIMScheduler { /// Creates a new DDIM scheduler given the number of steps to be /// used for inference as well as the number of steps that was used /// during training. fn new(inference_steps: usize, config: DDIMSchedulerConfig) -> Result<Self> { let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = match config.timestep_spacing { TimestepSpacing::Leading => (0..(inference_steps)) .map(|s| s * step_ratio + config.steps_offset) .rev() .collect(), TimestepSpacing::Trailing => std::iter::successors(Some(config.train_timesteps), |n| { if *n > step_ratio { Some(n - step_ratio) } else { None } }) .map(|n| n - 1) .collect(), TimestepSpacing::Linspace => { super::utils::linspace(0.0, (config.train_timesteps - 1) as f64, inference_steps)? .to_vec1::<f64>()? .iter() .map(|&f| f as usize) .rev() .collect() } }; let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } Ok(Self { alphas_cumprod, timesteps, step_ratio, init_noise_sigma: 1., config, }) } } impl Scheduler for DDIMScheduler { /// Performs a backward step during inference. fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; // https://github.com/huggingface/diffusers/blob/6e099e2c8ce4c4f5c7318e970a8c093dc5c7046e/src/diffusers/schedulers/scheduling_ddim.py#L195 let prev_timestep = if timestep > self.step_ratio { timestep - self.step_ratio } else { 0 }; let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = self.alphas_cumprod[prev_timestep]; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let (pred_original_sample, pred_epsilon) = match self.config.prediction_type { PredictionType::Epsilon => { let pred_original_sample = ((sample - (model_output * beta_prod_t.sqrt())?)? * (1. / alpha_prod_t.sqrt()))?; (pred_original_sample, model_output.clone()) } PredictionType::VPrediction => { let pred_original_sample = ((sample * alpha_prod_t.sqrt())? - (model_output * beta_prod_t.sqrt())?)?; let pred_epsilon = ((model_output * alpha_prod_t.sqrt())? + (sample * beta_prod_t.sqrt())?)?; (pred_original_sample, pred_epsilon) } PredictionType::Sample => { let pred_original_sample = model_output.clone(); let pred_epsilon = ((sample - &pred_original_sample * alpha_prod_t.sqrt())? * (1. / beta_prod_t.sqrt()))?; (pred_original_sample, pred_epsilon) } }; let variance = (beta_prod_t_prev / beta_prod_t) * (1. - alpha_prod_t / alpha_prod_t_prev); let std_dev_t = self.config.eta * variance.sqrt(); let pred_sample_direction = (pred_epsilon * (1. - alpha_prod_t_prev - std_dev_t * std_dev_t).sqrt())?; let prev_sample = ((pred_original_sample * alpha_prod_t_prev.sqrt())? + pred_sample_direction)?; if self.config.eta > 0. { &prev_sample + Tensor::randn( 0f32, std_dev_t as f32, prev_sample.shape(), prev_sample.device(), )? } else { Ok(prev_sample) } } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor> { Ok(sample) } fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; let sqrt_alpha_prod = self.alphas_cumprod[timestep].sqrt(); let sqrt_one_minus_alpha_prod = (1.0 - self.alphas_cumprod[timestep]).sqrt(); (original * sqrt_alpha_prod)? + (noise * sqrt_one_minus_alpha_prod)? } fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
3
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/euler_ancestral_discrete.rs
//! Ancestral sampling with Euler method steps. //! //! Based on the original [`k-diffusion` implementation by Katherine Crowson]( https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L72). //! use super::{ schedulers::{ betas_for_alpha_bar, BetaSchedule, PredictionType, Scheduler, SchedulerConfig, TimestepSpacing, }, utils::interp, }; use candle::{bail, Error, Result, Tensor}; /// The configuration for the EulerAncestral Discrete scheduler. #[derive(Debug, Clone, Copy)] pub struct EulerAncestralDiscreteSchedulerConfig { /// The value of beta at the beginning of training.n pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// Adjust the indexes of the inference schedule by this value. pub steps_offset: usize, /// prediction type of the scheduler function, one of `epsilon` (predicting /// the noise of the diffusion process), `sample` (directly predicting the noisy sample`) /// or `v_prediction` (see [section 2.4](https://imagen.research.google/video/paper.pdf)) pub prediction_type: PredictionType, /// number of diffusion steps used to train the model pub train_timesteps: usize, /// time step spacing for the diffusion process pub timestep_spacing: TimestepSpacing, } impl Default for EulerAncestralDiscreteSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085f64, beta_end: 0.012f64, beta_schedule: BetaSchedule::ScaledLinear, steps_offset: 1, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, timestep_spacing: TimestepSpacing::Leading, } } } impl SchedulerConfig for EulerAncestralDiscreteSchedulerConfig { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> { Ok(Box::new(EulerAncestralDiscreteScheduler::new( inference_steps, *self, )?)) } } /// The EulerAncestral Discrete scheduler. #[derive(Debug, Clone)] pub struct EulerAncestralDiscreteScheduler { timesteps: Vec<usize>, sigmas: Vec<f64>, init_noise_sigma: f64, pub config: EulerAncestralDiscreteSchedulerConfig, } // clip_sample: False, set_alpha_to_one: False impl EulerAncestralDiscreteScheduler { /// Creates a new EulerAncestral Discrete scheduler given the number of steps to be /// used for inference as well as the number of steps that was used /// during training. pub fn new( inference_steps: usize, config: EulerAncestralDiscreteSchedulerConfig, ) -> Result<Self> { let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = match config.timestep_spacing { TimestepSpacing::Leading => (0..(inference_steps)) .map(|s| s * step_ratio + config.steps_offset) .rev() .collect(), TimestepSpacing::Trailing => std::iter::successors(Some(config.train_timesteps), |n| { if *n > step_ratio { Some(n - step_ratio) } else { None } }) .map(|n| n - 1) .collect(), TimestepSpacing::Linspace => { super::utils::linspace(0.0, (config.train_timesteps - 1) as f64, inference_steps)? .to_vec1::<f64>()? .iter() .map(|&f| f as usize) .rev() .collect() } }; let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } let sigmas: Vec<f64> = alphas_cumprod .iter() .map(|&f| ((1. - f) / f).sqrt()) .collect(); let sigmas_xa: Vec<_> = (0..sigmas.len()).map(|i| i as f64).collect(); let mut sigmas_int = interp( &timesteps.iter().map(|&t| t as f64).collect::<Vec<_>>(), &sigmas_xa, &sigmas, ); sigmas_int.push(0.0); // standard deviation of the initial noise distribution // f64 does not implement Ord such that there is no `max`, so we need to use this workaround let init_noise_sigma = *sigmas_int .iter() .chain(std::iter::once(&0.0)) .reduce(|a, b| if a > b { a } else { b }) .expect("init_noise_sigma could not be reduced from sigmas - this should never happen"); Ok(Self { sigmas: sigmas_int, timesteps, init_noise_sigma, config, }) } } impl Scheduler for EulerAncestralDiscreteScheduler { fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. /// /// Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm fn scale_model_input(&self, sample: Tensor, timestep: usize) -> Result<Tensor> { let step_index = match self.timesteps.iter().position(|&t| t == timestep) { Some(i) => i, None => bail!("timestep out of this schedulers bounds: {timestep}"), }; let sigma = self .sigmas .get(step_index) .expect("step_index out of sigma bounds - this shouldn't happen"); sample / ((sigma.powi(2) + 1.).sqrt()) } /// Performs a backward step during inference. fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let step_index = self .timesteps .iter() .position(|&p| p == timestep) .ok_or_else(|| Error::Msg("timestep out of this schedulers bounds".to_string()))?; let sigma_from = &self.sigmas[step_index]; let sigma_to = &self.sigmas[step_index + 1]; // 1. compute predicted original sample (x_0) from sigma-scaled predicted noise let pred_original_sample = match self.config.prediction_type { PredictionType::Epsilon => (sample - (model_output * *sigma_from))?, PredictionType::VPrediction => { ((model_output * (-sigma_from / (sigma_from.powi(2) + 1.0).sqrt()))? + (sample / (sigma_from.powi(2) + 1.0))?)? } PredictionType::Sample => bail!("prediction_type not implemented yet: sample"), }; let sigma_up = (sigma_to.powi(2) * (sigma_from.powi(2) - sigma_to.powi(2)) / sigma_from.powi(2)) .sqrt(); let sigma_down = (sigma_to.powi(2) - sigma_up.powi(2)).sqrt(); // 2. convert to a ODE derivative let derivative = ((sample - pred_original_sample)? / *sigma_from)?; let dt = sigma_down - *sigma_from; let prev_sample = (sample + derivative * dt)?; let noise = prev_sample.randn_like(0.0, 1.0)?; prev_sample + noise * sigma_up } fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> { let step_index = self .timesteps .iter() .position(|&p| p == timestep) .ok_or_else(|| Error::Msg("timestep out of this schedulers bounds".to_string()))?; let sigma = self .sigmas .get(step_index) .expect("step_index out of sigma bounds - this shouldn't happen"); original + (noise * *sigma)? } fn init_noise_sigma(&self) -> f64 { match self.config.timestep_spacing { TimestepSpacing::Trailing | TimestepSpacing::Linspace => self.init_noise_sigma, TimestepSpacing::Leading => (self.init_noise_sigma.powi(2) + 1.0).sqrt(), } } }
4
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/unet_2d.rs
//! 2D UNet Denoising Models //! //! The 2D Unet models take as input a noisy sample and the current diffusion //! timestep and return a denoised version of the input. use super::embeddings::{TimestepEmbedding, Timesteps}; use super::unet_2d_blocks::*; use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Result, Tensor}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone, Copy)] pub struct BlockConfig { pub out_channels: usize, /// When `None` no cross-attn is used, when `Some(d)` then cross-attn is used and `d` is the /// number of transformer blocks to be used. pub use_cross_attn: Option<usize>, pub attention_head_dim: usize, } #[derive(Debug, Clone)] pub struct UNet2DConditionModelConfig { pub center_input_sample: bool, pub flip_sin_to_cos: bool, pub freq_shift: f64, pub blocks: Vec<BlockConfig>, pub layers_per_block: usize, pub downsample_padding: usize, pub mid_block_scale_factor: f64, pub norm_num_groups: usize, pub norm_eps: f64, pub cross_attention_dim: usize, pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, } impl Default for UNet2DConditionModelConfig { fn default() -> Self { Self { center_input_sample: false, flip_sin_to_cos: true, freq_shift: 0., blocks: vec![ BlockConfig { out_channels: 320, use_cross_attn: Some(1), attention_head_dim: 8, }, BlockConfig { out_channels: 640, use_cross_attn: Some(1), attention_head_dim: 8, }, BlockConfig { out_channels: 1280, use_cross_attn: Some(1), attention_head_dim: 8, }, BlockConfig { out_channels: 1280, use_cross_attn: None, attention_head_dim: 8, }, ], layers_per_block: 2, downsample_padding: 1, mid_block_scale_factor: 1., norm_num_groups: 32, norm_eps: 1e-5, cross_attention_dim: 1280, sliced_attention_size: None, use_linear_projection: false, } } } #[derive(Debug)] pub(crate) enum UNetDownBlock { Basic(DownBlock2D), CrossAttn(CrossAttnDownBlock2D), } #[derive(Debug)] enum UNetUpBlock { Basic(UpBlock2D), CrossAttn(CrossAttnUpBlock2D), } #[derive(Debug)] pub struct UNet2DConditionModel { conv_in: Conv2d, time_proj: Timesteps, time_embedding: TimestepEmbedding, down_blocks: Vec<UNetDownBlock>, mid_block: UNetMidBlock2DCrossAttn, up_blocks: Vec<UNetUpBlock>, conv_norm_out: nn::GroupNorm, conv_out: Conv2d, span: tracing::Span, config: UNet2DConditionModelConfig, } impl UNet2DConditionModel { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, use_flash_attn: bool, config: UNet2DConditionModelConfig, ) -> Result<Self> { let n_blocks = config.blocks.len(); let b_channels = config.blocks[0].out_channels; let bl_channels = config.blocks.last().unwrap().out_channels; let bl_attention_head_dim = config.blocks.last().unwrap().attention_head_dim; let time_embed_dim = b_channels * 4; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = conv2d(in_channels, b_channels, 3, conv_cfg, vs.pp("conv_in"))?; let time_proj = Timesteps::new(b_channels, config.flip_sin_to_cos, config.freq_shift); let time_embedding = TimestepEmbedding::new(vs.pp("time_embedding"), b_channels, time_embed_dim)?; let vs_db = vs.pp("down_blocks"); let down_blocks = (0..n_blocks) .map(|i| { let BlockConfig { out_channels, use_cross_attn, attention_head_dim, } = config.blocks[i]; // Enable automatic attention slicing if the config sliced_attention_size is set to 0. let sliced_attention_size = match config.sliced_attention_size { Some(0) => Some(attention_head_dim / 2), _ => config.sliced_attention_size, }; let in_channels = if i > 0 { config.blocks[i - 1].out_channels } else { b_channels }; let db_cfg = DownBlock2DConfig { num_layers: config.layers_per_block, resnet_eps: config.norm_eps, resnet_groups: config.norm_num_groups, add_downsample: i < n_blocks - 1, downsample_padding: config.downsample_padding, ..Default::default() }; if let Some(transformer_layers_per_block) = use_cross_attn { let config = CrossAttnDownBlock2DConfig { downblock: db_cfg, attn_num_head_channels: attention_head_dim, cross_attention_dim: config.cross_attention_dim, sliced_attention_size, use_linear_projection: config.use_linear_projection, transformer_layers_per_block, }; let block = CrossAttnDownBlock2D::new( vs_db.pp(i.to_string()), in_channels, out_channels, Some(time_embed_dim), use_flash_attn, config, )?; Ok(UNetDownBlock::CrossAttn(block)) } else { let block = DownBlock2D::new( vs_db.pp(i.to_string()), in_channels, out_channels, Some(time_embed_dim), db_cfg, )?; Ok(UNetDownBlock::Basic(block)) } }) .collect::<Result<Vec<_>>>()?; // https://github.com/huggingface/diffusers/blob/a76f2ad538e73b34d5fe7be08c8eb8ab38c7e90c/src/diffusers/models/unet_2d_condition.py#L462 let mid_transformer_layers_per_block = match config.blocks.last() { None => 1, Some(block) => block.use_cross_attn.unwrap_or(1), }; let mid_cfg = UNetMidBlock2DCrossAttnConfig { resnet_eps: config.norm_eps, output_scale_factor: config.mid_block_scale_factor, cross_attn_dim: config.cross_attention_dim, attn_num_head_channels: bl_attention_head_dim, resnet_groups: Some(config.norm_num_groups), use_linear_projection: config.use_linear_projection, transformer_layers_per_block: mid_transformer_layers_per_block, ..Default::default() }; let mid_block = UNetMidBlock2DCrossAttn::new( vs.pp("mid_block"), bl_channels, Some(time_embed_dim), use_flash_attn, mid_cfg, )?; let vs_ub = vs.pp("up_blocks"); let up_blocks = (0..n_blocks) .map(|i| { let BlockConfig { out_channels, use_cross_attn, attention_head_dim, } = config.blocks[n_blocks - 1 - i]; // Enable automatic attention slicing if the config sliced_attention_size is set to 0. let sliced_attention_size = match config.sliced_attention_size { Some(0) => Some(attention_head_dim / 2), _ => config.sliced_attention_size, }; let prev_out_channels = if i > 0 { config.blocks[n_blocks - i].out_channels } else { bl_channels }; let in_channels = { let index = if i == n_blocks - 1 { 0 } else { n_blocks - i - 2 }; config.blocks[index].out_channels }; let ub_cfg = UpBlock2DConfig { num_layers: config.layers_per_block + 1, resnet_eps: config.norm_eps, resnet_groups: config.norm_num_groups, add_upsample: i < n_blocks - 1, ..Default::default() }; if let Some(transformer_layers_per_block) = use_cross_attn { let config = CrossAttnUpBlock2DConfig { upblock: ub_cfg, attn_num_head_channels: attention_head_dim, cross_attention_dim: config.cross_attention_dim, sliced_attention_size, use_linear_projection: config.use_linear_projection, transformer_layers_per_block, }; let block = CrossAttnUpBlock2D::new( vs_ub.pp(i.to_string()), in_channels, prev_out_channels, out_channels, Some(time_embed_dim), use_flash_attn, config, )?; Ok(UNetUpBlock::CrossAttn(block)) } else { let block = UpBlock2D::new( vs_ub.pp(i.to_string()), in_channels, prev_out_channels, out_channels, Some(time_embed_dim), ub_cfg, )?; Ok(UNetUpBlock::Basic(block)) } }) .collect::<Result<Vec<_>>>()?; let conv_norm_out = nn::group_norm( config.norm_num_groups, b_channels, config.norm_eps, vs.pp("conv_norm_out"), )?; let conv_out = conv2d(b_channels, out_channels, 3, conv_cfg, vs.pp("conv_out"))?; let span = tracing::span!(tracing::Level::TRACE, "unet2d"); Ok(Self { conv_in, time_proj, time_embedding, down_blocks, mid_block, up_blocks, conv_norm_out, conv_out, span, config, }) } pub fn forward( &self, xs: &Tensor, timestep: f64, encoder_hidden_states: &Tensor, ) -> Result<Tensor> { let _enter = self.span.enter(); self.forward_with_additional_residuals(xs, timestep, encoder_hidden_states, None, None) } pub fn forward_with_additional_residuals( &self, xs: &Tensor, timestep: f64, encoder_hidden_states: &Tensor, down_block_additional_residuals: Option<&[Tensor]>, mid_block_additional_residual: Option<&Tensor>, ) -> Result<Tensor> { let (bsize, _channels, height, width) = xs.dims4()?; let device = xs.device(); let n_blocks = self.config.blocks.len(); let num_upsamplers = n_blocks - 1; let default_overall_up_factor = 2usize.pow(num_upsamplers as u32); let forward_upsample_size = height % default_overall_up_factor != 0 || width % default_overall_up_factor != 0; // 0. center input if necessary let xs = if self.config.center_input_sample { ((xs * 2.0)? - 1.0)? } else { xs.clone() }; // 1. time let emb = (Tensor::ones(bsize, xs.dtype(), device)? * timestep)?; let emb = self.time_proj.forward(&emb)?; let emb = self.time_embedding.forward(&emb)?; // 2. pre-process let xs = self.conv_in.forward(&xs)?; // 3. down let mut down_block_res_xs = vec![xs.clone()]; let mut xs = xs; for down_block in self.down_blocks.iter() { let (_xs, res_xs) = match down_block { UNetDownBlock::Basic(b) => b.forward(&xs, Some(&emb))?, UNetDownBlock::CrossAttn(b) => { b.forward(&xs, Some(&emb), Some(encoder_hidden_states))? } }; down_block_res_xs.extend(res_xs); xs = _xs; } let new_down_block_res_xs = if let Some(down_block_additional_residuals) = down_block_additional_residuals { let mut v = vec![]; // A previous version of this code had a bug because of the addition being made // in place via += hence modifying the input of the mid block. for (i, residuals) in down_block_additional_residuals.iter().enumerate() { v.push((&down_block_res_xs[i] + residuals)?) } v } else { down_block_res_xs }; let mut down_block_res_xs = new_down_block_res_xs; // 4. mid let xs = self .mid_block .forward(&xs, Some(&emb), Some(encoder_hidden_states))?; let xs = match mid_block_additional_residual { None => xs, Some(m) => (m + xs)?, }; // 5. up let mut xs = xs; let mut upsample_size = None; for (i, up_block) in self.up_blocks.iter().enumerate() { let n_resnets = match up_block { UNetUpBlock::Basic(b) => b.resnets.len(), UNetUpBlock::CrossAttn(b) => b.upblock.resnets.len(), }; let res_xs = down_block_res_xs.split_off(down_block_res_xs.len() - n_resnets); if i < n_blocks - 1 && forward_upsample_size { let (_, _, h, w) = down_block_res_xs.last().unwrap().dims4()?; upsample_size = Some((h, w)) } xs = match up_block { UNetUpBlock::Basic(b) => b.forward(&xs, &res_xs, Some(&emb), upsample_size)?, UNetUpBlock::CrossAttn(b) => b.forward( &xs, &res_xs, Some(&emb), upsample_size, Some(encoder_hidden_states), )?, }; } // 6. post-process let xs = self.conv_norm_out.forward(&xs)?; let xs = nn::ops::silu(&xs)?; self.conv_out.forward(&xs) } }
5
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/utils.rs
use candle::{Device, Result, Tensor}; pub fn linspace(start: f64, stop: f64, steps: usize) -> Result<Tensor> { if steps == 0 { Tensor::from_vec(Vec::<f64>::new(), steps, &Device::Cpu) } else if steps == 1 { Tensor::from_vec(vec![start], steps, &Device::Cpu) } else { let delta = (stop - start) / (steps - 1) as f64; let vs = (0..steps) .map(|step| start + step as f64 * delta) .collect::<Vec<_>>(); Tensor::from_vec(vs, steps, &Device::Cpu) } } /// A linear interpolator for a sorted array of x and y values. struct LinearInterpolator<'x, 'y> { xp: &'x [f64], fp: &'y [f64], cache: usize, } impl LinearInterpolator<'_, '_> { fn accel_find(&mut self, x: f64) -> usize { let xidx = self.cache; if x < self.xp[xidx] { self.cache = self.xp[0..xidx].partition_point(|o| *o < x); self.cache = self.cache.saturating_sub(1); } else if x >= self.xp[xidx + 1] { self.cache = self.xp[xidx..self.xp.len()].partition_point(|o| *o < x) + xidx; self.cache = self.cache.saturating_sub(1); } self.cache } fn eval(&mut self, x: f64) -> f64 { if x < self.xp[0] || x > self.xp[self.xp.len() - 1] { return f64::NAN; } let idx = self.accel_find(x); let x_l = self.xp[idx]; let x_h = self.xp[idx + 1]; let y_l = self.fp[idx]; let y_h = self.fp[idx + 1]; let dx = x_h - x_l; if dx > 0.0 { y_l + (x - x_l) / dx * (y_h - y_l) } else { f64::NAN } } } pub fn interp(x: &[f64], xp: &[f64], fp: &[f64]) -> Vec<f64> { let mut interpolator = LinearInterpolator { xp, fp, cache: 0 }; x.iter().map(|&x| interpolator.eval(x)).collect() }
6
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/stable_diffusion/schedulers.rs
#![allow(dead_code)] //! # Diffusion pipelines and models //! //! Noise schedulers can be used to set the trade-off between //! inference speed and quality. use candle::{Result, Tensor}; pub trait SchedulerConfig: std::fmt::Debug + Send + Sync { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>>; } /// This trait represents a scheduler for the diffusion process. pub trait Scheduler { fn timesteps(&self) -> &[usize]; fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor>; fn init_noise_sigma(&self) -> f64; fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor>; fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor>; } /// This represents how beta ranges from its minimum value to the maximum /// during training. #[derive(Debug, Clone, Copy)] pub enum BetaSchedule { /// Linear interpolation. Linear, /// Linear interpolation of the square root of beta. ScaledLinear, /// Glide cosine schedule SquaredcosCapV2, } #[derive(Debug, Clone, Copy)] pub enum PredictionType { Epsilon, VPrediction, Sample, } /// Time step spacing for the diffusion process. /// /// "linspace", "leading", "trailing" corresponds to annotation of Table 2. of the [paper](https://arxiv.org/abs/2305.08891) #[derive(Debug, Clone, Copy)] pub enum TimestepSpacing { Leading, Linspace, Trailing, } impl Default for TimestepSpacing { fn default() -> Self { Self::Leading } } /// Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of /// `(1-beta)` over time from `t = [0,1]`. /// /// Contains a function `alpha_bar` that takes an argument `t` and transforms it to the cumulative product of `(1-beta)` /// up to that part of the diffusion process. pub(crate) fn betas_for_alpha_bar(num_diffusion_timesteps: usize, max_beta: f64) -> Result<Tensor> { let alpha_bar = |time_step: usize| { f64::cos((time_step as f64 + 0.008) / 1.008 * std::f64::consts::FRAC_PI_2).powi(2) }; let mut betas = Vec::with_capacity(num_diffusion_timesteps); for i in 0..num_diffusion_timesteps { let t1 = i / num_diffusion_timesteps; let t2 = (i + 1) / num_diffusion_timesteps; betas.push((1.0 - alpha_bar(t2) / alpha_bar(t1)).min(max_beta)); } let betas_len = betas.len(); Tensor::from_vec(betas, betas_len, &candle::Device::Cpu) }
7
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/openclip/text_model.rs
//! Text encoder as used in most OpenCLIP pretrained models //! https://github.com/mlfoundations/open_clip use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::{ embedding, layer_norm, linear, ops::softmax_last_dim, Embedding, LayerNorm, Linear, Module, VarBuilder, }; #[derive(Debug, Clone)] pub struct Config { pub vocab_size: usize, pub embed_dim: usize, pub intermediate_size: usize, pub max_position_embeddings: usize, pub pad_with: Option<String>, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub projection_dim: usize, } impl Config { pub fn vit_base_patch32() -> Self { Self { vocab_size: 49408, embed_dim: 512, intermediate_size: 2048, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 12, num_attention_heads: 8, projection_dim: 512, } } } #[derive(Clone, Debug)] struct TextEmbeddings { token_embedding: Embedding, position_embedding: Tensor, } impl TextEmbeddings { fn new(vs: VarBuilder, c: &Config) -> Result<Self> { let token_embedding = embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?; let position_embedding = vs.get( (c.max_position_embeddings, c.embed_dim), "positional_embedding", )?; Ok(TextEmbeddings { token_embedding, position_embedding, }) } } impl Module for TextEmbeddings { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let seq_length = input_ids.dim(D::Minus1)?; let inputs_embeds = self.token_embedding.forward(input_ids)?; let position_embedding = self.position_embedding.narrow(0, 0, seq_length)?; inputs_embeds.broadcast_add(&position_embedding) } } #[derive(Clone, Debug)] struct Attention { k_proj: candle_nn::Linear, v_proj: candle_nn::Linear, q_proj: candle_nn::Linear, out_proj: Linear, head_dim: usize, scale: f64, num_attention_heads: usize, } impl Attention { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let embed_dim = c.embed_dim; let num_attention_heads = c.num_attention_heads; let in_proj_weights = vs .get((embed_dim * 3, embed_dim), "in_proj_weight")? .chunk(3, 0)?; let (q_w, k_w, v_w) = ( &in_proj_weights[0], &in_proj_weights[1], &in_proj_weights[2], ); let in_proj_biases = vs.get(embed_dim * 3, "in_proj_bias")?.chunk(3, 0)?; let (q_b, k_b, v_b) = (&in_proj_biases[0], &in_proj_biases[1], &in_proj_biases[2]); let q_proj = Linear::new(q_w.clone(), Some(q_b.clone())); let k_proj = Linear::new(k_w.clone(), Some(k_b.clone())); let v_proj = Linear::new(v_w.clone(), Some(v_b.clone())); let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?; let head_dim = embed_dim / num_attention_heads; let scale = (head_dim as f64).powf(-0.5); Ok(Attention { k_proj, v_proj, q_proj, out_proj, head_dim, scale, num_attention_heads, }) } fn shape_multihead(&self, xs: &Tensor, bsz: usize, seq_len: usize) -> Result<Tensor> { xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous()? .to_dtype(DType::F32) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let in_dtype = xs.dtype(); let (bsz, seq_len, embed_dim) = xs.dims3()?; let q = self.shape_multihead(&self.q_proj.forward(xs)?, bsz, seq_len)?; let k = self.shape_multihead(&self.k_proj.forward(xs)?, bsz, seq_len)?; let v = self.shape_multihead(&self.v_proj.forward(xs)?, bsz, seq_len)?; let q = (q * self.scale)?; let attn_weights = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?; let attn_weights = softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?.to_dtype(in_dtype)?; let attn_output = attn_output .transpose(1, 2)? .contiguous()? .reshape((bsz, seq_len, embed_dim))?; let out = self.out_proj.forward(&attn_output)?; Ok(out) } } #[derive(Clone, Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vs: VarBuilder, c: &Config) -> Result<Self> { let fc1 = linear(c.embed_dim, c.intermediate_size, vs.pp("c_fc"))?; let fc2 = linear(c.intermediate_size, c.embed_dim, vs.pp("c_proj"))?; Ok(Mlp { fc1, fc2 }) } } impl Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?; self.fc2.forward(&xs.gelu_erf()?) } } #[derive(Clone, Debug)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: Mlp, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(vs: VarBuilder, c: &Config) -> Result<Self> { let self_attn = Attention::new(vs.pp("attn"), c)?; let layer_norm1 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_1"))?; let mlp = Mlp::new(vs.pp("mlp"), c)?; let layer_norm2 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_2"))?; Ok(EncoderLayer { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self.layer_norm1.forward(xs)?; let xs = self.self_attn.forward(&xs)?; let xs = (xs + residual)?; let residual = &xs; let xs = self.layer_norm2.forward(&xs)?; let xs = self.mlp.forward(&xs)?; let out = (xs + residual)?; Ok(out) } } #[derive(Clone, Debug)] pub struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("resblocks"); let mut layers: Vec<EncoderLayer> = Vec::new(); for index in 0..c.num_hidden_layers { let layer = EncoderLayer::new(vs.pp(index.to_string()), c)?; layers.push(layer) } Ok(Encoder { layers }) } pub fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)?; } Ok(xs) } } /// A text transformer as used in CLIP variants. #[derive(Clone, Debug)] pub struct OpenClipTextTransformer { embeddings: TextEmbeddings, encoder: Encoder, final_layer_norm: LayerNorm, } impl OpenClipTextTransformer { pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> { let embeddings = TextEmbeddings::new(vs.clone(), c)?; let final_layer_norm = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_final"))?; let encoder = Encoder::new(vs.pp("transformer"), c)?; Ok(OpenClipTextTransformer { embeddings, encoder, final_layer_norm, }) } pub fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let input_ids = self.embeddings.forward(input_ids)?; let input_ids = self.encoder.forward(&input_ids)?; self.final_layer_norm.forward(&input_ids) } } impl Module for OpenClipTextTransformer { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let output = self.forward(input_ids)?; let sequence_max_indices = input_ids.argmax(D::Minus1)?.to_dtype(DType::I64)?; let mut indices = Vec::new(); for (batch_idx, &seq_idx) in sequence_max_indices.to_vec1::<i64>()?.iter().enumerate() { let index = output.i((batch_idx, seq_idx as usize))?.unsqueeze(0)?; indices.push(index); } Tensor::cat(&indices, 0) } }
8
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/openclip/mod.rs
//! Open Contrastive Language-Image Pre-Training //! //! Open Contrastive Language-Image Pre-Training (OpenCLIP) is an architecture trained on //! pairs of images with related texts. //! //! - 💻 [GH Link](https://github.com/mlfoundations/open_clip) //! - 📝 [Paper](https://arxiv.org/abs/2212.07143) //! //! ## Overview //! //! ![](https://raw.githubusercontent.com/mlfoundations/open_clip/main/docs/CLIP.png) pub mod text_model;
9
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_question_answering.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "question-answering" not in ALLOWED_TASKS, "question-answering not implemented", ) class QuestionAnsweringTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["question-answering"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "question-answering" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = {"question": "Where do I live ?", "context": "I live in New-York"} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"}) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"Where do I live ?") self.assertEqual( response.status_code, 400, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"error"})
0
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text_to_image.py
import os from io import BytesIO from unittest import TestCase, skipIf import PIL from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-to-image" not in ALLOWED_TASKS, "text-to-image not implemented", ) class TextToImageTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-to-image"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-to-image" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "soap bubble" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) image = PIL.Image.open(BytesIO(response.content)) self.assertTrue(isinstance(image, PIL.Image.Image)) def test_malformed_input(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
1
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_image_to_image.py
import base64 import os from io import BytesIO from unittest import TestCase, skipIf import PIL from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "image-to-image" not in ALLOWED_TASKS, "image-to-image not implemented", ) class ImageToImageTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["image-to-image"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "image-to-image" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): image = PIL.Image.new("RGB", (64, 64)) parameters = {"prompt": "soap bubble"} with TestClient(self.app) as client: response = client.post( "/", json={ "image": base64.b64encode(image).decode("utf-8"), "parameters": parameters, }, ) self.assertEqual( response.status_code, 200, ) image = PIL.Image.open(BytesIO(response.content)) self.assertTrue(isinstance(image, PIL.Image.Image)) def test_malformed_input(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
2
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_image_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "image-classification" not in ALLOWED_TASKS, "image-classification not implemented", ) class ImageClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["image-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "image-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def read(self, filename: str) -> bytes: dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "samples", filename) with open(filename, "rb") as f: bpayload = f.read() return bpayload def test_simple(self): bpayload = self.read("plane.jpg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(set(type(el) for el in content), {dict}) self.assertEqual( set((k, type(v)) for el in content for (k, v) in el.items()), {("label", str), ("score", float)}, ) def test_different_resolution(self): bpayload = self.read("plane2.jpg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(set(type(el) for el in content), {dict}) self.assertEqual( set(k for el in content for k in el.keys()), {"label", "score"} )
3
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-classification" not in ALLOWED_TASKS, "text-classification not implemented", ) class TextClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "It is a beautiful day outside" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
4
0
hf_public_repos/api-inference-community/docker_images/common
hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text2text_generation.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text2text-generation" not in ALLOWED_TASKS, "text2text-generation not implemented", ) class TextToSpeechTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text2text-generation"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text2text-generation" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): with TestClient(self.app) as client: response = client.post( "/", json={ "inputs": "English is tough. It can be understood " "through thorough thought though." }, ) self.assertEqual( response.status_code, 200, ) result = json.loads(response.content) self.assertEqual(type(result), list) # Add more tests here
5
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/pyannote_audio/requirements.txt
starlette==0.27.0 api-inference-community==0.0.25 torch==1.13.1 torchvision==0.12.0 torchaudio==0.11.0 torchtext==0.12.0 speechbrain==0.5.12 pyannote-audio==2.0.1 huggingface_hub==0.8.1
6
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/pyannote_audio/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="Hervé Bredin <[email protected]>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y RUN apt-get update -y && apt-get install ffmpeg -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV PYANNOTE_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
7
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/pyannote_audio/prestart.sh
python app/main.py
8
0
hf_public_repos/api-inference-community/docker_images/pyannote_audio
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import AutomaticSpeechRecognitionPipeline, Pipeline from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "automatic-speech-recognition": AutomaticSpeechRecognitionPipeline } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
9
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/convmixer.rs
//! ConvMixer implementation. //! //! See "Patches Are All You Need?" by Trockman et al. 2022 //! //! - 📝 [Arxiv](https://arxiv.org/abs/2201.09792) //! - 💻 [Github](https://github.com/locuslab/convmixer) //! use candle::Result; use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder}; #[allow(clippy::many_single_char_names)] fn conv2d_same( i: usize, o: usize, k: usize, c: Conv2dConfig, vb: VarBuilder, ) -> Result<impl Module> { let conv2d = candle_nn::conv2d(i, o, k, c, vb)?; let s = c.stride; let module = candle_nn::func(move |xs| { let ih = xs.dim(2)?; let iw = xs.dim(3)?; let oh = ih.div_ceil(s); let ow = iw.div_ceil(s); let pad_h = usize::max((oh - 1) * s + k - ih, 0); let pad_w = usize::max((ow - 1) * s + k - iw, 0); if pad_h > 0 || pad_w > 0 { xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)? .pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)? .apply(&conv2d) } else { xs.apply(&conv2d) } }); Ok(module) } fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> { let conv2d_cfg = Conv2dConfig { groups: dim, ..Default::default() }; let vb_fn = vb.pp(0).pp("fn"); let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?; let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?; let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?; let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?; Ok(candle_nn::func(move |xs| { let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?; (xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false) })) } fn convmixer( nclasses: usize, dim: usize, depth: usize, kernel_size: usize, patch_size: usize, vb: VarBuilder, ) -> Result<candle_nn::Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: patch_size, ..Default::default() }; let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?; let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?; let blocks: Vec<_> = (0..depth) .map(|index| block(dim, kernel_size, vb.pp(3 + index))) .collect::<Result<Vec<_>>>()?; let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?; Ok(candle_nn::func(move |xs| { let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?; for block in blocks.iter() { xs = xs.apply(block)? } // This performs the adaptive average pooling with a target size of (1, 1). xs.mean(3)?.mean(2)?.apply(&fc) })) } pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> { convmixer(nclasses, 1536, 20, 9, 7, vb) } pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> { convmixer(nclasses, 1024, 20, 9, 14, vb) }
0
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/phi3.rs
//! Microsoft Phi-3 model implementation //! //! See Phi model details at: //! - [Phi-3 Model](https://huggingface.co/microsoft/phi-3) //! //! The Phi series are decoder-only transformers designed for code and language tasks. //! Key characteristics: //! - Decoder-only transformer architecture //! - RoPE embeddings //! - Layer normalization //! - QK normalization //! - Mixed activation functions //! - Improved context window handling //! //! References: //! - [Hugging Face Implementation](https://huggingface.co/microsoft/phi-3) //! - [Alternative Implementation](https://huggingface.co/microsoft/phi-3/tree/main) //! // This implementation is based on: // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/modeling_phi3.py use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::VarBuilder; use std::sync::Arc; // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_act: candle_nn::Activation, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub bos_token_id: Option<u32>, pub eos_token_id: Option<u32>, pub rope_scaling: Option<String>, pub max_position_embeddings: usize, } impl Config { pub fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } } #[derive(Debug, Clone)] pub struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { pub fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.head_dim(); let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } pub fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] struct Attention { qkv_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let head_dim = cfg.head_dim(); let op_size = num_heads * head_dim + 2 * num_kv_heads * head_dim; let qkv_proj = linear(cfg.hidden_size, op_size, vb.pp("qkv_proj"))?; let o_proj = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("o_proj"))?; Ok(Self { qkv_proj, o_proj, rotary_emb, kv_cache: None, num_heads, num_kv_heads, num_kv_groups: num_heads / num_kv_heads, head_dim, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let qkv = self.qkv_proj.forward(xs)?; let query_pos = self.num_heads * self.head_dim; let query_states = qkv.narrow(D::Minus1, 0, query_pos)?; let key_states = qkv.narrow(D::Minus1, query_pos, self.num_kv_heads * self.head_dim)?; let value_states = qkv.narrow( D::Minus1, query_pos + self.num_kv_heads * self.head_dim, self.num_kv_heads * self.head_dim, )?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, ()))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Mlp { gate_up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, i_size: usize, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let i_size = cfg.intermediate_size; let gate_up_proj = linear(hidden_size, 2 * i_size, vb.pp("gate_up_proj"))?; let down_proj = linear(i_size, hidden_size, vb.pp("down_proj"))?; Ok(Self { gate_up_proj, down_proj, act_fn: cfg.hidden_act, i_size, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let up_states = xs.apply(&self.gate_up_proj)?; let gate = up_states.narrow(D::Minus1, 0, self.i_size)?; let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?; let up_states = (up_states * gate.apply(&self.act_fn))?; up_states.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: Mlp, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
1
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/prior.rs
use super::common::{AttnBlock, ResBlock, TimestepBlock}; use candle::{DType, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] struct Block { res_block: ResBlock, ts_block: TimestepBlock, attn_block: AttnBlock, } #[derive(Debug)] pub struct WPrior { projection: candle_nn::Conv2d, cond_mapper_lin1: candle_nn::Linear, cond_mapper_lin2: candle_nn::Linear, blocks: Vec<Block>, out_ln: super::common::WLayerNorm, out_conv: candle_nn::Conv2d, c_r: usize, } impl WPrior { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c: usize, c_cond: usize, c_r: usize, depth: usize, nhead: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { let projection = candle_nn::conv2d(c_in, c, 1, Default::default(), vb.pp("projection"))?; let cond_mapper_lin1 = candle_nn::linear(c_cond, c, vb.pp("cond_mapper.0"))?; let cond_mapper_lin2 = candle_nn::linear(c, c, vb.pp("cond_mapper.2"))?; let out_ln = super::common::WLayerNorm::new(c)?; let out_conv = candle_nn::conv2d(c, c_in * 2, 1, Default::default(), vb.pp("out.1"))?; let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let res_block = ResBlock::new(c, 0, 3, vb.pp(format!("blocks.{}", 3 * index)))?; let ts_block = TimestepBlock::new(c, c_r, vb.pp(format!("blocks.{}", 3 * index + 1)))?; let attn_block = AttnBlock::new( c, c, nhead, true, use_flash_attn, vb.pp(format!("blocks.{}", 3 * index + 2)), )?; blocks.push(Block { res_block, ts_block, attn_block, }) } Ok(Self { projection, cond_mapper_lin1, cond_mapper_lin2, blocks, out_ln, out_conv, c_r, }) } pub fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } pub fn forward(&self, xs: &Tensor, r: &Tensor, c: &Tensor) -> Result<Tensor> { let x_in = xs; let mut xs = xs.apply(&self.projection)?; let c_embed = c .apply(&self.cond_mapper_lin1)? .apply(&|xs: &_| candle_nn::ops::leaky_relu(xs, 0.2))? .apply(&self.cond_mapper_lin2)?; let r_embed = self.gen_r_embedding(r)?; for block in self.blocks.iter() { xs = block.res_block.forward(&xs, None)?; xs = block.ts_block.forward(&xs, &r_embed)?; xs = block.attn_block.forward(&xs, &c_embed)?; } let ab = xs.apply(&self.out_ln)?.apply(&self.out_conv)?.chunk(2, 1)?; (x_in - &ab[0])? / ((&ab[1] - 1.)?.abs()? + 1e-5) } }
2
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/ddpm.rs
use candle::{Result, Tensor}; #[derive(Debug, Clone)] pub struct DDPMWSchedulerConfig { scaler: f64, s: f64, } impl Default for DDPMWSchedulerConfig { fn default() -> Self { Self { scaler: 1f64, s: 0.008f64, } } } pub struct DDPMWScheduler { init_alpha_cumprod: f64, init_noise_sigma: f64, timesteps: Vec<f64>, pub config: DDPMWSchedulerConfig, } impl DDPMWScheduler { pub fn new(inference_steps: usize, config: DDPMWSchedulerConfig) -> Result<Self> { let init_alpha_cumprod = (config.s / (1. + config.s) * std::f64::consts::PI) .cos() .powi(2); let timesteps = (0..=inference_steps) .map(|i| 1. - i as f64 / inference_steps as f64) .collect::<Vec<_>>(); Ok(Self { init_alpha_cumprod, init_noise_sigma: 1.0, timesteps, config, }) } pub fn timesteps(&self) -> &[f64] { &self.timesteps } fn alpha_cumprod(&self, t: f64) -> f64 { let scaler = self.config.scaler; let s = self.config.s; let t = if scaler > 1. { 1. - (1. - t).powf(scaler) } else if scaler < 1. { t.powf(scaler) } else { t }; let alpha_cumprod = ((t + s) / (1. + s) * std::f64::consts::PI * 0.5) .cos() .powi(2) / self.init_alpha_cumprod; alpha_cumprod.clamp(0.0001, 0.9999) } fn previous_timestep(&self, ts: f64) -> f64 { let index = self .timesteps .iter() .enumerate() .map(|(idx, v)| (idx, (v - ts).abs())) .min_by(|x, y| x.1.total_cmp(&y.1)) .unwrap() .0; self.timesteps[index + 1] } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, ts: f64, sample: &Tensor) -> Result<Tensor> { let prev_t = self.previous_timestep(ts); let alpha_cumprod = self.alpha_cumprod(ts); let alpha_cumprod_prev = self.alpha_cumprod(prev_t); let alpha = alpha_cumprod / alpha_cumprod_prev; let mu = (sample - model_output * ((1. - alpha) / (1. - alpha_cumprod).sqrt()))?; let mu = (mu * (1. / alpha).sqrt())?; let std_noise = mu.randn_like(0., 1.)?; let std = std_noise * ((1. - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt(); if prev_t == 0. { Ok(mu) } else { mu + std } } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
3
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/attention_processor.rs
use candle::{Module, Result, Tensor}; use candle_nn::{linear, Linear, VarBuilder}; // A simplified version of: // https://github.com/huggingface/diffusers/blob/119ad2c3dc8a8fb8446a83f4bf6f20929487b47f/src/diffusers/models/attention_processor.py#L38 #[derive(Debug)] pub struct Attention { to_q: Linear, to_k: Linear, to_v: Linear, to_out: Linear, heads: usize, scale: f64, use_flash_attn: bool, } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } impl Attention { pub fn new( query_dim: usize, heads: usize, dim_head: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { let inner_dim = dim_head * heads; let scale = 1.0 / f64::sqrt(dim_head as f64); let to_q = linear(query_dim, inner_dim, vb.pp("to_q"))?; let to_k = linear(query_dim, inner_dim, vb.pp("to_k"))?; let to_v = linear(query_dim, inner_dim, vb.pp("to_v"))?; let to_out = linear(inner_dim, query_dim, vb.pp("to_out.0"))?; Ok(Self { to_q, to_k, to_v, to_out, scale, heads, use_flash_attn, }) } fn batch_to_head_dim(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, dim) = xs.dims3()?; xs.reshape((b_size / self.heads, self.heads, seq_len, dim))? .permute((0, 2, 1, 3))? .reshape((b_size / self.heads, seq_len, dim * self.heads)) } fn head_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, dim) = xs.dims3()?; xs.reshape((b_size, seq_len, self.heads, dim / self.heads))? .permute((0, 2, 1, 3))? .reshape((b_size * self.heads, seq_len, dim / self.heads)) } fn get_attention_scores(&self, query: &Tensor, key: &Tensor) -> Result<Tensor> { let attn_probs = (query.matmul(&key.t()?)? * self.scale)?; candle_nn::ops::softmax_last_dim(&attn_probs) } pub fn forward(&self, xs: &Tensor, encoder_hidden_states: &Tensor) -> Result<Tensor> { let (b_size, channel, h, w) = xs.dims4()?; let xs = xs.reshape((b_size, channel, h * w))?.t()?; let query = self.to_q.forward(&xs)?; let key = self.to_k.forward(encoder_hidden_states)?; let value = self.to_v.forward(encoder_hidden_states)?; let query = self.head_to_batch_dim(&query)?; let key = self.head_to_batch_dim(&key)?; let value = self.head_to_batch_dim(&value)?; let xs = if self.use_flash_attn { let init_dtype = query.dtype(); let q = query .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let k = key .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let v = value .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; flash_attn(&q, &k, &v, self.scale as f32, false)? .transpose(1, 2)? .squeeze(0)? .to_dtype(init_dtype)? } else { let attn_prs = self.get_attention_scores(&query, &key)?; attn_prs.matmul(&value)? }; let xs = self.batch_to_head_dim(&xs)?; self.to_out .forward(&xs)? .t()? .reshape((b_size, channel, h, w)) } }
4
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/mod.rs
//! Würstchen Efficient Diffusion Model //! //! Würstchen is an efficient diffusion model architecture for generating images using //! a two-stage approach with a small decoder and prior network. //! //! - 💻 [GH Link](https://github.com/dome272/Wuerstchen) //! - 🤗 [HF Link](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py) //! - 📝 [Paper](https://openreview.net/pdf?id=gU58AyJlYz) //! //! ## Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" alt="" width=320> //! <p>"Anthropomorphic cat dressed as a fire fighter"</p> //! </div> pub mod attention_processor; pub mod common; pub mod ddpm; pub mod diffnext; pub mod paella_vq; pub mod prior;
5
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/common.rs
use candle::{DType, Module, Result, Tensor, D}; use candle_nn::VarBuilder; // https://github.com/huggingface/diffusers/blob/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py#L22 #[derive(Debug)] pub struct WLayerNorm { eps: f64, } impl WLayerNorm { pub fn new(_size: usize) -> Result<Self> { Ok(Self { eps: 1e-6 }) } } impl Module for WLayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.permute((0, 2, 3, 1))?; let x_dtype = xs.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = xs.dim(D::Minus1)?; let xs = xs.to_dtype(internal_dtype)?; let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let xs = xs.broadcast_sub(&mean_x)?; let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)? .to_dtype(x_dtype)? .permute((0, 3, 1, 2)) } } #[derive(Debug)] pub struct LayerNormNoWeights { eps: f64, } impl LayerNormNoWeights { pub fn new(_size: usize) -> Result<Self> { Ok(Self { eps: 1e-6 }) } } impl Module for LayerNormNoWeights { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let x_dtype = xs.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = xs.dim(D::Minus1)?; let xs = xs.to_dtype(internal_dtype)?; let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let xs = xs.broadcast_sub(&mean_x)?; let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)? .to_dtype(x_dtype) } } #[derive(Debug)] pub struct TimestepBlock { mapper: candle_nn::Linear, } impl TimestepBlock { pub fn new(c: usize, c_timestep: usize, vb: VarBuilder) -> Result<Self> { let mapper = candle_nn::linear(c_timestep, c * 2, vb.pp("mapper"))?; Ok(Self { mapper }) } pub fn forward(&self, xs: &Tensor, t: &Tensor) -> Result<Tensor> { let ab = self .mapper .forward(t)? .unsqueeze(2)? .unsqueeze(3)? .chunk(2, 1)?; xs.broadcast_mul(&(&ab[0] + 1.)?)?.broadcast_add(&ab[1]) } } #[derive(Debug)] pub struct GlobalResponseNorm { gamma: Tensor, beta: Tensor, } impl GlobalResponseNorm { pub fn new(dim: usize, vb: VarBuilder) -> Result<Self> { let gamma = vb.get((1, 1, 1, dim), "gamma")?; let beta = vb.get((1, 1, 1, dim), "beta")?; Ok(Self { gamma, beta }) } } impl Module for GlobalResponseNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let agg_norm = xs.sqr()?.sum_keepdim((1, 2))?.sqrt()?; let stand_div_norm = agg_norm.broadcast_div(&(agg_norm.mean_keepdim(D::Minus1)? + 1e-6)?)?; xs.broadcast_mul(&stand_div_norm)? .broadcast_mul(&self.gamma)? .broadcast_add(&self.beta)? + xs } } #[derive(Debug)] pub struct ResBlock { depthwise: candle_nn::Conv2d, norm: WLayerNorm, channelwise_lin1: candle_nn::Linear, channelwise_grn: GlobalResponseNorm, channelwise_lin2: candle_nn::Linear, } impl ResBlock { pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { padding: ksize / 2, groups: c, ..Default::default() }; let depthwise = candle_nn::conv2d(c + c_skip, c, ksize, cfg, vb.pp("depthwise"))?; let norm = WLayerNorm::new(c)?; let channelwise_lin1 = candle_nn::linear(c, c * 4, vb.pp("channelwise.0"))?; let channelwise_grn = GlobalResponseNorm::new(c * 4, vb.pp("channelwise.2"))?; let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?; Ok(Self { depthwise, norm, channelwise_lin1, channelwise_grn, channelwise_lin2, }) } pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> { let x_res = xs; let xs = match x_skip { None => xs.clone(), Some(x_skip) => Tensor::cat(&[xs, x_skip], 1)?, }; let xs = xs .apply(&self.depthwise)? .apply(&self.norm)? .permute((0, 2, 3, 1))?; let xs = xs .apply(&self.channelwise_lin1)? .gelu_erf()? .apply(&self.channelwise_grn)? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_res } } use super::attention_processor::Attention; #[derive(Debug)] pub struct AttnBlock { self_attn: bool, norm: WLayerNorm, attention: Attention, kv_mapper_lin: candle_nn::Linear, } impl AttnBlock { pub fn new( c: usize, c_cond: usize, nhead: usize, self_attn: bool, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { let norm = WLayerNorm::new(c)?; let attention = Attention::new(c, nhead, c / nhead, use_flash_attn, vb.pp("attention"))?; let kv_mapper_lin = candle_nn::linear(c_cond, c, vb.pp("kv_mapper.1"))?; Ok(Self { self_attn, norm, attention, kv_mapper_lin, }) } pub fn forward(&self, xs: &Tensor, kv: &Tensor) -> Result<Tensor> { let kv = candle_nn::ops::silu(kv)?.apply(&self.kv_mapper_lin)?; let norm_xs = self.norm.forward(xs)?; let kv = if self.self_attn { let (b_size, channel, _, _) = xs.dims4()?; let norm_xs = norm_xs.reshape((b_size, channel, ()))?.transpose(1, 2)?; Tensor::cat(&[&norm_xs, &kv], 1)?.contiguous()? } else { kv }; xs + self.attention.forward(&norm_xs, &kv) } }
6
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/paella_vq.rs
use super::common::LayerNormNoWeights; use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct MixingResidualBlock { norm1: LayerNormNoWeights, depthwise_conv: candle_nn::Conv2d, norm2: LayerNormNoWeights, channelwise_lin1: candle_nn::Linear, channelwise_lin2: candle_nn::Linear, gammas: Vec<f32>, } impl MixingResidualBlock { pub fn new(inp: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let norm1 = LayerNormNoWeights::new(inp)?; let norm2 = LayerNormNoWeights::new(inp)?; let cfg = candle_nn::Conv2dConfig { groups: inp, ..Default::default() }; let depthwise_conv = candle_nn::conv2d(inp, inp, 3, cfg, vb.pp("depthwise.1"))?; let channelwise_lin1 = candle_nn::linear(inp, embed_dim, vb.pp("channelwise.0"))?; let channelwise_lin2 = candle_nn::linear(embed_dim, inp, vb.pp("channelwise.2"))?; let gammas = vb.get(6, "gammas")?.to_vec1::<f32>()?; Ok(Self { norm1, depthwise_conv, norm2, channelwise_lin1, channelwise_lin2, gammas, }) } } impl Module for MixingResidualBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mods = &self.gammas; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm1)? .permute((0, 3, 1, 2))? .affine(1. + mods[0] as f64, mods[1] as f64)?; let x_temp = candle_nn::ops::replication_pad2d(&x_temp, 1)?; let xs = (xs + x_temp.apply(&self.depthwise_conv)? * mods[2] as f64)?; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm2)? .permute((0, 3, 1, 2))? .affine(1. + mods[3] as f64, mods[4] as f64)?; let x_temp = x_temp .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_temp * mods[5] as f64 } } #[derive(Debug)] pub struct PaellaVQ { in_block_conv: candle_nn::Conv2d, out_block_conv: candle_nn::Conv2d, down_blocks: Vec<(Option<candle_nn::Conv2d>, MixingResidualBlock)>, down_blocks_conv: candle_nn::Conv2d, down_blocks_bn: candle_nn::BatchNorm, up_blocks_conv: candle_nn::Conv2d, up_blocks: Vec<(Vec<MixingResidualBlock>, Option<candle_nn::ConvTranspose2d>)>, } impl PaellaVQ { pub fn new(vb: VarBuilder) -> Result<Self> { const IN_CHANNELS: usize = 3; const OUT_CHANNELS: usize = 3; const LATENT_CHANNELS: usize = 4; const EMBED_DIM: usize = 384; const BOTTLENECK_BLOCKS: usize = 12; const C_LEVELS: [usize; 2] = [EMBED_DIM / 2, EMBED_DIM]; let in_block_conv = candle_nn::conv2d( IN_CHANNELS * 4, C_LEVELS[0], 1, Default::default(), vb.pp("in_block.1"), )?; let out_block_conv = candle_nn::conv2d( C_LEVELS[0], OUT_CHANNELS * 4, 1, Default::default(), vb.pp("out_block.0"), )?; let mut down_blocks = Vec::new(); let vb_d = vb.pp("down_blocks"); let mut d_idx = 0; for (i, &c_level) in C_LEVELS.iter().enumerate() { let conv_block = if i > 0 { let cfg = candle_nn::Conv2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv2d(C_LEVELS[i - 1], c_level, 4, cfg, vb_d.pp(d_idx))?; d_idx += 1; Some(block) } else { None }; let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_d.pp(d_idx))?; d_idx += 1; down_blocks.push((conv_block, res_block)) } let vb_d = vb_d.pp(d_idx); let down_blocks_conv = candle_nn::conv2d_no_bias( C_LEVELS[1], LATENT_CHANNELS, 1, Default::default(), vb_d.pp(0), )?; let down_blocks_bn = candle_nn::batch_norm(LATENT_CHANNELS, 1e-5, vb_d.pp(1))?; let mut up_blocks = Vec::new(); let vb_u = vb.pp("up_blocks"); let mut u_idx = 0; let up_blocks_conv = candle_nn::conv2d( LATENT_CHANNELS, C_LEVELS[1], 1, Default::default(), vb_u.pp(u_idx).pp(0), )?; u_idx += 1; for (i, &c_level) in C_LEVELS.iter().rev().enumerate() { let mut res_blocks = Vec::new(); let n_bottleneck_blocks = if i == 0 { BOTTLENECK_BLOCKS } else { 1 }; for _j in 0..n_bottleneck_blocks { let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_u.pp(u_idx))?; u_idx += 1; res_blocks.push(res_block) } let conv_block = if i < C_LEVELS.len() - 1 { let cfg = candle_nn::ConvTranspose2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv_transpose2d( c_level, C_LEVELS[C_LEVELS.len() - i - 2], 4, cfg, vb_u.pp(u_idx), )?; u_idx += 1; Some(block) } else { None }; up_blocks.push((res_blocks, conv_block)) } Ok(Self { in_block_conv, down_blocks, down_blocks_conv, down_blocks_bn, up_blocks, up_blocks_conv, out_block_conv, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = candle_nn::ops::pixel_unshuffle(xs, 2)?.apply(&self.in_block_conv)?; for down_block in self.down_blocks.iter() { if let Some(conv) = &down_block.0 { xs = xs.apply(conv)? } xs = xs.apply(&down_block.1)? } xs.apply(&self.down_blocks_conv)? .apply_t(&self.down_blocks_bn, false) } pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { // TODO: quantizer if we want to support `force_not_quantize=False`. let mut xs = xs.apply(&self.up_blocks_conv)?; for up_block in self.up_blocks.iter() { for b in up_block.0.iter() { xs = xs.apply(b)?; } if let Some(conv) = &up_block.1 { xs = xs.apply(conv)? } } xs.apply(&self.out_block_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, 2)) } } impl Module for PaellaVQ { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.decode(&self.encode(xs)?) } }
7
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/wuerstchen/diffnext.rs
use super::common::{AttnBlock, GlobalResponseNorm, LayerNormNoWeights, TimestepBlock, WLayerNorm}; use candle::{DType, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct ResBlockStageB { depthwise: candle_nn::Conv2d, norm: WLayerNorm, channelwise_lin1: candle_nn::Linear, channelwise_grn: GlobalResponseNorm, channelwise_lin2: candle_nn::Linear, } impl ResBlockStageB { pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { groups: c, padding: ksize / 2, ..Default::default() }; let depthwise = candle_nn::conv2d(c, c, ksize, cfg, vb.pp("depthwise"))?; let norm = WLayerNorm::new(c)?; let channelwise_lin1 = candle_nn::linear(c + c_skip, c * 4, vb.pp("channelwise.0"))?; let channelwise_grn = GlobalResponseNorm::new(4 * c, vb.pp("channelwise.2"))?; let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?; Ok(Self { depthwise, norm, channelwise_lin1, channelwise_grn, channelwise_lin2, }) } pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> { let x_res = xs; let xs = xs.apply(&self.depthwise)?.apply(&self.norm)?; let xs = match x_skip { None => xs.clone(), Some(x_skip) => Tensor::cat(&[&xs, x_skip], 1)?, }; let xs = xs .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_grn)? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_res } } #[derive(Debug)] struct SubBlock { res_block: ResBlockStageB, ts_block: TimestepBlock, attn_block: Option<AttnBlock>, } #[derive(Debug)] struct DownBlock { layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::Conv2d>, sub_blocks: Vec<SubBlock>, } #[derive(Debug)] struct UpBlock { sub_blocks: Vec<SubBlock>, layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::ConvTranspose2d>, } #[derive(Debug)] pub struct WDiffNeXt { clip_mapper: candle_nn::Linear, effnet_mappers: Vec<Option<candle_nn::Conv2d>>, seq_norm: LayerNormNoWeights, embedding_conv: candle_nn::Conv2d, embedding_ln: WLayerNorm, down_blocks: Vec<DownBlock>, up_blocks: Vec<UpBlock>, clf_ln: WLayerNorm, clf_conv: candle_nn::Conv2d, c_r: usize, patch_size: usize, } impl WDiffNeXt { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c_out: usize, c_r: usize, c_cond: usize, clip_embd: usize, patch_size: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { const C_HIDDEN: [usize; 4] = [320, 640, 1280, 1280]; const BLOCKS: [usize; 4] = [4, 4, 14, 4]; const NHEAD: [usize; 4] = [1, 10, 20, 20]; const INJECT_EFFNET: [bool; 4] = [false, true, true, true]; const EFFNET_EMBD: usize = 16; let clip_mapper = candle_nn::linear(clip_embd, c_cond, vb.pp("clip_mapper"))?; let mut effnet_mappers = Vec::with_capacity(2 * INJECT_EFFNET.len()); let vb_e = vb.pp("effnet_mappers"); for (i, &inject) in INJECT_EFFNET.iter().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i), )?) } else { None }; effnet_mappers.push(c) } for (i, &inject) in INJECT_EFFNET.iter().rev().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i + INJECT_EFFNET.len()), )?) } else { None }; effnet_mappers.push(c) } let seq_norm = LayerNormNoWeights::new(c_cond)?; let embedding_ln = WLayerNorm::new(C_HIDDEN[0])?; let embedding_conv = candle_nn::conv2d( c_in * patch_size * patch_size, C_HIDDEN[0], 1, Default::default(), vb.pp("embedding.1"), )?; let mut down_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate() { let vb = vb.pp("down_blocks").pp(i); let (layer_norm, conv, start_layer_i) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv2d(C_HIDDEN[i - 1], c_hidden, 2, cfg, vb.pp("0.1"))?; (Some(layer_norm), Some(conv), 1) } else { (None, None, 0) }; let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = start_layer_i; for _j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let res_block = ResBlockStageB::new(c_hidden, c_skip, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let down_block = DownBlock { layer_norm, conv, sub_blocks, }; down_blocks.push(down_block) } let mut up_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate().rev() { let vb = vb.pp("up_blocks").pp(C_HIDDEN.len() - 1 - i); let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = 0; for j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let c_skip_res = if i < BLOCKS.len() - 1 && j == 0 { c_hidden + c_skip } else { c_skip }; let res_block = ResBlockStageB::new(c_hidden, c_skip_res, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let (layer_norm, conv) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv_transpose2d( c_hidden, C_HIDDEN[i - 1], 2, cfg, vb.pp(layer_i).pp(1), )?; (Some(layer_norm), Some(conv)) } else { (None, None) }; let up_block = UpBlock { layer_norm, conv, sub_blocks, }; up_blocks.push(up_block) } let clf_ln = WLayerNorm::new(C_HIDDEN[0])?; let clf_conv = candle_nn::conv2d( C_HIDDEN[0], 2 * c_out * patch_size * patch_size, 1, Default::default(), vb.pp("clf.1"), )?; Ok(Self { clip_mapper, effnet_mappers, seq_norm, embedding_conv, embedding_ln, down_blocks, up_blocks, clf_ln, clf_conv, c_r, patch_size, }) } fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } fn gen_c_embeddings(&self, clip: &Tensor) -> Result<Tensor> { clip.apply(&self.clip_mapper)?.apply(&self.seq_norm) } pub fn forward( &self, xs: &Tensor, r: &Tensor, effnet: &Tensor, clip: Option<&Tensor>, ) -> Result<Tensor> { const EPS: f64 = 1e-3; let r_embed = self.gen_r_embedding(r)?; let clip = match clip { None => None, Some(clip) => Some(self.gen_c_embeddings(clip)?), }; let x_in = xs; let mut xs = xs .apply(&|xs: &_| candle_nn::ops::pixel_unshuffle(xs, self.patch_size))? .apply(&self.embedding_conv)? .apply(&self.embedding_ln)?; let mut level_outputs = Vec::new(); for (i, down_block) in self.down_blocks.iter().enumerate() { if let Some(ln) = &down_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &down_block.conv { xs = xs.apply(conv)? } let skip = match &self.effnet_mappers[i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for block in down_block.sub_blocks.iter() { xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } level_outputs.push(xs.clone()) } level_outputs.reverse(); let mut xs = level_outputs[0].clone(); for (i, up_block) in self.up_blocks.iter().enumerate() { let effnet_c = match &self.effnet_mappers[self.down_blocks.len() + i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for (j, block) in up_block.sub_blocks.iter().enumerate() { let skip = if j == 0 && i > 0 { Some(&level_outputs[i]) } else { None }; let skip = match (skip, effnet_c.as_ref()) { (Some(skip), Some(effnet_c)) => Some(Tensor::cat(&[skip, effnet_c], 1)?), (None, Some(skip)) | (Some(skip), None) => Some(skip.clone()), (None, None) => None, }; xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } if let Some(ln) = &up_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &up_block.conv { xs = xs.apply(conv)? } } let ab = xs .apply(&self.clf_ln)? .apply(&self.clf_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, self.patch_size))? .chunk(2, 1)?; let b = ((candle_nn::ops::sigmoid(&ab[1])? * (1. - EPS * 2.))? + EPS)?; (x_in - &ab[0])? / b } }
8
0
hf_public_repos/candle/candle-transformers/src/models
hf_public_repos/candle/candle-transformers/src/models/mimi/encodec.rs
// Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. use super::{conv, quantization, seanet, transformer}; use candle::{DType, Device, Module, Result, StreamTensor, StreamingModule, Tensor}; use candle_nn::VarBuilder; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ResampleMethod { Conv, Interpolate, } #[derive(Debug, Clone)] pub struct Config { pub channels: usize, pub sample_rate: f64, pub frame_rate: f64, pub renormalize: bool, pub resample_method: ResampleMethod, pub seanet: seanet::Config, pub transformer: transformer::Config, pub quantizer_n_q: usize, pub quantizer_bins: usize, pub quantizer_dim: usize, } impl Config { // /lustre/scwpod02/client/kyutai/alex/mimi_exp/xps/b7d2bd5a/.hydra/config.yaml pub fn v0_1(num_codebooks: Option<usize>) -> Self { let seanet_cfg = seanet::Config { dimension: 512, channels: 1, causal: true, n_filters: 64, n_residual_layers: 1, activation: candle_nn::Activation::Elu(1.), compress: 2, dilation_base: 2, disable_norm_outer_blocks: 0, final_activation: None, kernel_size: 7, residual_kernel_size: 3, last_kernel_size: 3, lstm: 0, norm: conv::Norm::WeightNorm, pad_mode: conv::PadMode::Constant, ratios: vec![8, 6, 5, 4], true_skip: true, }; let transformer_cfg = transformer::Config { d_model: seanet_cfg.dimension, num_heads: 8, num_layers: 8, causal: true, norm_first: true, bias_ff: false, bias_attn: false, layer_scale: Some(0.01), context: 250, conv_kernel_size: 5, use_conv_bias: true, use_conv_block: false, cross_attention: false, max_period: 10000, gating: None, norm: super::NormType::LayerNorm, positional_embedding: transformer::PositionalEmbedding::Rope, dim_feedforward: 2048, kv_repeat: 1, conv_layout: true, // see builders.py max_seq_len: 8192, // the transformer works at 25hz so this is ~5 mins. }; Config { channels: 1, sample_rate: 24_000., frame_rate: 12.5, renormalize: true, resample_method: ResampleMethod::Conv, seanet: seanet_cfg, transformer: transformer_cfg, quantizer_n_q: num_codebooks.unwrap_or(16), quantizer_bins: 2048, quantizer_dim: 256, } } } #[derive(Debug, Clone)] pub struct Encodec { encoder: seanet::SeaNetEncoder, decoder: seanet::SeaNetDecoder, encoder_transformer: transformer::ProjectedTransformer, decoder_transformer: transformer::ProjectedTransformer, downsample: conv::ConvDownsample1d, upsample: conv::ConvTrUpsample1d, quantizer: quantization::SplitResidualVectorQuantizer, config: Config, } impl Encodec { pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> { let dim = cfg.seanet.dimension; let encoder = seanet::SeaNetEncoder::new(&cfg.seanet, vb.pp("encoder"))?; let decoder = seanet::SeaNetDecoder::new(&cfg.seanet, vb.pp("decoder"))?; let encoder_transformer = transformer::ProjectedTransformer::new( dim, &[dim], &cfg.transformer, vb.pp("encoder_transformer"), )?; let decoder_transformer = transformer::ProjectedTransformer::new( dim, &[dim], &cfg.transformer, vb.pp("decoder_transformer"), )?; let quantizer = quantization::SplitResidualVectorQuantizer::new( /* dim */ cfg.quantizer_dim, /* input_dim */ Some(dim), /* output_dim */ Some(dim), /* n_q */ cfg.quantizer_n_q, /* bins */ cfg.quantizer_bins, vb.pp("quantizer"), )?; let encoder_frame_rate = cfg.sample_rate / cfg.seanet.ratios.iter().product::<usize>() as f64; let downsample_stride = (encoder_frame_rate / cfg.frame_rate) as usize; // `upsample` and `downsample` only apply if frame_rate is different from encoder_frame_rate. let downsample = conv::ConvDownsample1d::new( /* stride */ downsample_stride, /* dim */ dim, /* causal */ true, /* learnt */ true, vb.pp("downsample"), )?; let upsample = conv::ConvTrUpsample1d::new( /* stride */ downsample_stride, /* dim */ dim, /* causal */ true, /* learnt */ true, vb.pp("upsample"), )?; Ok(Self { encoder, decoder, encoder_transformer, decoder_transformer, quantizer, downsample, upsample, config: cfg, }) } pub fn config(&self) -> &Config { &self.config } pub fn encode_pre_quantize(&mut self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; self.encoder_transformer.reset_state(); let xs = self.encoder_transformer.forward(&xs)?; let xs = &xs[0]; xs.apply(&self.downsample) } pub fn encode(&mut self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; self.encoder_transformer.reset_state(); let xs = self.encoder_transformer.forward(&xs)?; let xs = &xs[0]; let xs = xs.apply(&self.downsample)?; let codes = self.quantizer.encode(&xs)?; Ok(codes) } pub fn encode_step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let xs = self.encoder.step(xs)?; let xs = self.encoder_transformer.step(&xs)?; let xs = self.downsample.step(&xs)?; match xs.as_option() { None => Ok(().into()), Some(xs) => { let codes = self.quantizer.encode(xs)?; Ok(codes.into()) } } } pub fn decode(&mut self, codes: &Tensor) -> Result<Tensor> { let emb = self.quantizer.decode(codes)?; let emb = emb.apply(&self.upsample)?; self.decoder_transformer.reset_state(); let outs = self.decoder_transformer.forward(&emb)?; let out = &outs[0]; self.decoder.forward(out) } pub fn decode_step(&mut self, codes: &StreamTensor) -> Result<StreamTensor> { let emb = match codes.as_option() { Some(codes) => StreamTensor::from_tensor(self.quantizer.decode(codes)?), None => StreamTensor::empty(), }; let emb = self.upsample.step(&emb)?; let out = self.decoder_transformer.step(&emb)?; self.decoder.step(&out) } pub fn reset_state(&mut self) { self.encoder.reset_state(); self.encoder_transformer.reset_state(); self.decoder.reset_state(); self.decoder_transformer.reset_state(); self.upsample.reset_state(); } } pub fn load(model_file: &str, num_codebooks: Option<usize>, dev: &Device) -> Result<Encodec> { let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, dev)? }; let cfg = Config::v0_1(num_codebooks); let encodec = Encodec::new(cfg, vb)?; Ok(encodec) }
9
0
hf_public_repos
hf_public_repos/bloom-jax-inference/setup.py
from setuptools import setup, find_packages setup( name='bloom_inference', version='0.0.0', packages=find_packages() )
0
0
hf_public_repos
hf_public_repos/bloom-jax-inference/ray_tpu.py
import functools import os import subprocess import time import glob import requests from fabric import Connection @functools.lru_cache() def get_bearer(): return subprocess.check_output("gcloud auth print-access-token", shell=True).decode("utf-8").strip() @functools.lru_cache() def get_project(): return subprocess.check_output('gcloud config list --format "value(core.project)"', shell=True).decode( "utf-8").strip() def check_tpu(name, zone): headers = { "Authorization": f"Bearer {get_bearer()}", } response = requests.get( f"https://tpu.googleapis.com/v2alpha1/projects/{get_project()}/locations/{zone}/nodes/{name}", headers=headers) return response.json() def get_connection( name, zone, ): info = check_tpu(name, zone) outputs = [] for i in info["networkEndpoints"]: outputs.append(Connection(i["ipAddress"], connect_kwargs={ "key_filename": os.path.expanduser("~/.ssh/google_compute_engine"), })) return outputs def start_ray(conn, address): # start afresh each launch (temporarily) conn.run("sudo rm -rf *.py bloom-jax-inference") # make directory of structure: bloom_inference/bloom_inference/modeling_bloom conn.run("mkdir bloom-jax-inference bloom-jax-inference/bloom_inference bloom-jax-inference/bloom_inference/modeling_bloom -p") # copy run files into bloom_inference for i in glob.glob("*.py"): conn.put(i, "bloom-jax-inference/") # copy CPU/TPU manager files into bloom_inference/bloom_inference for i in glob.glob("bloom_inference/*.py"): conn.put(i, "bloom-jax-inference/bloom_inference/") # copy CPU/TPU manager files into bloom_inference/bloom_inference for i in glob.glob("scripts/*.sh"): conn.put(i, "bloom_inference/scripts/") # copy modeling files into bloom_inference/bloom_inference/modeling_bloom for i in glob.glob("bloom_inference/modeling_bloom/*.py"): conn.put(i, "bloom-jax-inference/bloom_inference/modeling_bloom/") # copy modeling files into bloom_inference/bloom_inference/modeling_bloom for i in glob.glob("*.sh"): conn.put(i, "bloom-jax-inference/") # copy key files into bloom_inference conn.put("key.json", "bloom-jax-inference/") # transfer start-up script from CPU -> hosts and give permissions conn.sudo("chmod +x bloom_inference/scripts/ray_tpu.sh", hide=True) try: conn.run("ray stop -f", hide=True) except: pass time.sleep(1) # run start-up script out = conn.run(f"bash /tmp/ray-tpu.sh {address}", hide=False) # display result print(out)
1
0
hf_public_repos
hf_public_repos/bloom-jax-inference/requirements.txt
ray==1.13.0 aiohttp==3.7 fabric dataclasses func_timeout numpy requests numpy
2
0
hf_public_repos
hf_public_repos/bloom-jax-inference/sharding_example.py
import numpy as np import jax import jax.numpy as jnp from flax.core.frozen_dict import freeze from jax.experimental import PartitionSpec as P from t5x.partitioning import PjitPartitioner from t5x.train_state import InferenceState from bloom_inference.modeling_bloom import FlaxBloomForCausalLM, BloomConfig from transformers import AutoTokenizer ckpt = "sanchit-gandhi/bloom-350m-scan-t5x" config = BloomConfig(n_layer=1) model, params = FlaxBloomForCausalLM.from_pretrained(ckpt, _do_init=False, dtype=jnp.bfloat16, use_scan=True) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-350m", use_fast=False) # 1D parameter partitioning with 2D activation partitioning logical_axis_rules = [ ('batch', 'data'), ('mlp', 'model'), ('heads', 'model'), ('vocab', 'model'), # shard remaining activations; weight matrices already have axes mapped to 'model' ('embed', 'model'), ('kv', None), ('joined_kv', None), ('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None), ] # 2D parameter and activation partitioning logical_axis_rules_full = [ ('batch', 'data'), ('mlp', 'model'), ('heads', 'model'), ('vocab', 'model'), # shard both activations and weight matrices on the remaining available axis ('embed', 'model'), ('embed', 'data'), ('kv', None), ('joined_kv', None), ('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None), ] # TODO: Add this in model init def init_fn(): input_shape = (1,1) input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) rng = jax.random.PRNGKey(0) return model.module.init(rng, input_ids, attention_mask, return_dict=False) param_axes = jax.eval_shape(init_fn)["params_axes"] # Axis names metadata # create InferenceState, since the partitioner expects it. state = InferenceState( step=jnp.array(0), params=freeze(model.params_shape_tree), params_axes=freeze(param_axes), flax_mutables=None, flax_mutables_axes=param_axes, ) num_mp_partitions = 8 partitioner = PjitPartitioner(num_mp_partitions, logical_axis_rules=logical_axis_rules_full) mesh_axes = partitioner.get_mesh_axes(state) params_spec = mesh_axes.params shard_params = partitioner.partition(model.to_bf16, (params_spec,), params_spec) # This will auto-magically run in mesh context params = shard_params(freeze(params)) def generate(params, input_ids, attention_mask): output_ids = model.generate(input_ids, attention_mask=attention_mask, params=params).sequences return output_ids p_generate = partitioner.partition( generate, in_axis_resources=(params_spec, P("data"), P("data")), out_axis_resources=P("data") ) tokenizer.padding_side = "left" model.config.max_length = 256 model.config.num_beams = 1 model.config.do_sample = True model.config.pad_token_id = tokenizer.pad_token_id prompt = "Reciepe for pasta with coconut:" inputs = tokenizer([prompt] * 8, return_tensors="jax", padding="max_length", truncation=True, max_length=64) # BS = 8 gen_ids = p_generate(freeze(params), inputs["input_ids"], inputs["attention_mask"]) generated_text = tokenizer.batch_decode(np.asarray(gen_ids), skip_special_tokens=True)
3
0
hf_public_repos
hf_public_repos/bloom-jax-inference/checkpointer_example.py
import numpy as np import jax import jax.numpy as jnp from jax.experimental import PartitionSpec as P from flax.core.frozen_dict import freeze from t5x.partitioning import PjitPartitioner from t5x.train_state import InferenceState from t5x.checkpoints import Checkpointer from bloom_inference.modeling_bloom import FlaxBloomForCausalLM, BloomConfig from transformers import AutoTokenizer jax.config.update('jax_parallel_functions_output_gda', True) ckpt = "sanchit-gandhi/bloom-350m-scan-t5x" config = BloomConfig(n_layer=1) model, params = FlaxBloomForCausalLM.from_pretrained(ckpt, _do_init=False, dtype=jnp.bfloat16, use_scan=True) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-350m", use_fast=False) # 2D parameter and activation partitioning logical_axis_rules_full = [ ('batch', 'data'), ('mlp', 'model'), ('heads', 'model'), ('vocab', 'model'), # shard both activations and weight matrices on the remaining available axis ('embed', 'model'), ('embed', 'data'), ('kv', None), ('joined_kv', None), ('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None), ] def init_state(): input_shape = (1,1) input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) rng = jax.random.PRNGKey(0) initial_vars = model.module.init(rng, input_ids, attention_mask, return_dict=False) return InferenceState.create(initial_vars) state_shapes = jax.eval_shape(init_state) num_mp_partitions = 4 partitioner = PjitPartitioner(num_mp_partitions, logical_axis_rules=logical_axis_rules_full) mesh_axes = partitioner.get_mesh_axes(state_shapes) params_spec = mesh_axes.params p_shard_params = partitioner.partition(model.to_bf16, (params_spec,), params_spec) # This will auto-magically run in mesh context params = p_shard_params(freeze(params)) # create frozen dict of model variables (params, params_axes), expected format of the .create method of InferenceState model_variables = freeze({'params': params, 'params_axes': state_shapes.params_axes}) # create InferenceState in .create method format (takes care of all attributes) # TODO: flax_mutables & flax_mutables_axes required? state = InferenceState.create(model_variables) # Instantiate checkpointer path = "gs://suraj-tpu-bucket/bloom-6b3-scan-t5x-v3-8-pretrained" checkpointer = Checkpointer(state_shapes, partitioner, path, use_gda=True, restore_dtype=jnp.bfloat16, save_dtype=jnp.bfloat16) # save state -> save at step 0 will save to dir /checkpoint_0 checkpointer.save(state) # load state path = "gs://suraj-tpu-bucket/bloom-6b3-scan-t5x-v3-8-pretrained/checkpoint_0" loaded_state = checkpointer.restore(path=path) # Sanity checks # 1. check params shapes equal # chex.assert_trees_all_equal_shapes(state.params, loaded_state.params), "params shapes not equal" # 2. check params all equal # chex.assert_trees_all_equal(state.params, loaded_state.params), "params values not equal" # 3. check params axes all equal # chex.assert_trees_all_equal(state.params_axes, loaded_state.params_axes), "params axes not equal" def generate(params, input_ids, attention_mask): output_ids = model.generate(input_ids, attention_mask=attention_mask, params=params).sequences return output_ids p_generate = partitioner.partition( generate, in_axis_resources=(params_spec, P("data"), P("data")), out_axis_resources=P("data") ) # setup for generation tokenizer.padding_side = "left" model.config.max_length = 64 model.config.num_beams = 1 model.config.do_sample = False prompts = ["This is cool "] * 4 inputs = tokenizer(prompts, return_tensors="jax", padding="max_length", truncation=True, max_length=16) # This will auto-magically run in mesh context gen_ids = p_generate(loaded_state.params, inputs["input_ids"], inputs["attention_mask"]) generated_text = tokenizer.batch_decode(gen_ids.local_shards[0].data, skip_special_tokens=False) if jax.process_index() == 0: print(generated_text)
4
0
hf_public_repos
hf_public_repos/bloom-jax-inference/run_speed.py
import argparse import time import numpy as np import jax import jax.numpy as jnp from jax.experimental import PartitionSpec as P from t5x.partitioning import PjitPartitioner from t5x.train_state import InferenceState from t5x.checkpoints import Checkpointer from bloom_inference.modeling_bloom import FlaxBloomForCausalLM, BloomConfig from transformers import AutoTokenizer # create a parser to get ckpt, path, max_len, input_len parser = argparse.ArgumentParser() parser.add_argument("--ckpt", type=str, default="bigscience/bloom") parser.add_argument("--t5x_path", type=str, default="gs://bloom-jax-us-central2-b/bloom-176B-scan-t5x/checkpoint_0") parser.add_argument("--max_len", type=int, default=100) parser.add_argument("--input_len", type=int, default=10) args = parser.parse_args() ckpt = args.ckpt path = args.t5x_path max_len = args.max_len input_len = args.input_len config = BloomConfig.from_pretrained(ckpt) model = FlaxBloomForCausalLM(config, _do_init=False, dtype=jnp.bfloat16, use_scan=True) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-350m", use_fast=False) # 2D parameter and activation partitioning logical_axis_rules_full = [ ('batch', 'data'), ('mlp', 'model'), ('heads', 'model'), ('vocab', 'model'), # shard both activations and weight matrices on the remaining available axis ('embed', 'model'), ('embed', 'data'), ('kv', None), ('joined_kv', None), ('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None), ] def init_state(): input_shape = (1,1) input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) rng = jax.random.PRNGKey(0) initial_vars = model.module.init(rng, input_ids, attention_mask, return_dict=False) return InferenceState.create(initial_vars) state_shapes = jax.eval_shape(init_state) # model_parallel_submesh = (2, 2, 2, 1), (2, 4, 1, 1), (2, 1, 4, 1) (1, 4, 2, 1) (1, 2, 4, 1) model_parallel_submesh = (1, 2, 4, 1) partitioner = PjitPartitioner( model_parallel_submesh=model_parallel_submesh, logical_axis_rules=logical_axis_rules_full ) mesh_axes = partitioner.get_mesh_axes(state_shapes) params_spec = mesh_axes.params # Instantiate checkpointer checkpointer = Checkpointer(state_shapes, partitioner, path, use_gda=True, restore_dtype=jnp.bfloat16, save_dtype=jnp.bfloat16) # load state loaded_state = checkpointer.restore(path=path) def generate(params, input_ids, attention_mask): output_ids = model.generate(input_ids, attention_mask=attention_mask, params=params).sequences return output_ids p_generate = partitioner.partition( generate, in_axis_resources=(params_spec, P("data"), P("data")), out_axis_resources=P("data") ) # setup for generation tokenizer.padding_side = "left" model.config.max_length = max_len model.config.num_beams = 1 model.config.do_sample = True model.config.top_p = 0.9 prompts = ["This is cool "] * 4 inputs = tokenizer(prompts, return_tensors="jax", padding="max_length", truncation=True, max_length=input_len) # This will auto-magically run in mesh context start = time.time() gen_ids = p_generate(loaded_state.params, inputs["input_ids"], inputs["attention_mask"]) generated_text = tokenizer.batch_decode(gen_ids, skip_special_tokens=False) if jax.process_index() == 0: print("Compilation time:", time.time() - start) start = time.time() gen_ids = p_generate(loaded_state.params, inputs["input_ids"], inputs["attention_mask"]) generated_text = tokenizer.batch_decode(gen_ids.local_shards[0].data, skip_special_tokens=False) if jax.process_index() == 0: print("Generation time:", time.time() - start) if jax.process_index() == 0: print(generated_text)
5
0
hf_public_repos
hf_public_repos/bloom-jax-inference/launch_generate.sh
INSTANCE=bloom-tpu-v4-64 ZONE=us-central2-b PROJECT=huggingface-ml # run script.bash through run_script.bash gcloud alpha compute tpus tpu-vm ssh $INSTANCE --project=$PROJECT --zone=$ZONE \ --force-key-file-overwrite --strict-host-key-checking=no \ --worker=all \ --command="bash ~/bloom-jax-inference/run_generate.sh"
6
0
hf_public_repos
hf_public_repos/bloom-jax-inference/run.py
import functools import time from multiprocessing import pool import ray from ray_tpu import get_connection, start_ray from bloom_inference.tpu_manager import TPUManager tpu_name="bloom-tpu-v4-64" region="us-central2-b" ckpt = "bigscience/bloom" t5x_path = "gs://bloom-jax-us-central2-b/bloom-176B-scan-t5x/checkpoint_0" max_len = 128 max_input_len = 64 model_parallel_submesh = (1, 2, 4, 1) # for v4-64 def setup(): # get Python list of TPU hosts conns = get_connection(tpu_name, region) print(len(conns)) address='10.130.0.10:8080' head_info = ray.init(include_dashboard=False, address="auto") # object_store_memory=10**9, # start ray CPU<->TPU on all hosts with pool.ThreadPool(processes=len(conns)) as p: p.map(functools.partial(start_ray, address=address), conns) def init_manager(): # initialise TPU manager t = TPUManager( 8, ckpt=ckpt, t5x_path=t5x_path, max_len=max_len, max_input_len=max_input_len, model_parallel_submesh=model_parallel_submesh, ) return t # # benchmark compile step # start = time.time() # print(t.generate(4*['Recipe for coconut pasta:'])) # print(f"Generations completed in {time.time() - start:.06}s") # # benchmark generate # start = time.time() # print(t.generate(4*['Recipe for coconut pasta:'])) # print(f"Generations completed in {time.time() - start:.06}s") # # shutdown ray rpc # ray.shutdown()
7
0
hf_public_repos
hf_public_repos/bloom-jax-inference/README.md
# BLOOM 🌸 Inference in JAX ## Structure CPU Host: as defined in TPU manager TPU Host: as defined in Host worker `ray`: distributes load from CPU host -> TPU hosts Example usage: `run.py` ## Setting Up a TPU-Manager The TPU hosts are managed by a single TPU manager. This TPU manager takes the form of a single CPU device. First, create a CPU VM in the **same region** as that of the TPU pod. This is important to enable the TPU manager to communicate with the TPU hosts. A suitable device config is as follows: 1. Region & Zone: TO MATCH TPU ZONE 2. Machine type: c2-standard-8 3. CPU platform: Intel Cascade Lake 4. Boot disk: 256GB balanced persistent disk SSH into the CPU and set-up a Python environment with the **same Python version** as that of the TPUs. The default TPU Python version is 3.8.10. You should ensure the Python version of th CPU matches this. ``` python3.8 -m venv /path/to/venv ``` If the above does not work, run the following and then repeat: ``` sudo apt-get update sudo apt-get install python3-venv ``` Activate Python env: ``` source /path/to/venv/bin/activate ``` Check Python version is 3.8.10: ``` python --version ``` Clone the repository and install requirements: ``` git clone https://github.com/huggingface/bloom-jax-inference.git cd bloom-jax-inference pip install -r requirements.txt ``` Authenticate `gcloud`, which will require copy-and-pasting a command into a terminal window on a machine with a browser installed: ``` gcloud auth login ``` Now SSH into one of the workers. This will generate an SSH key: ``` gcloud alpha compute tpus tpu-vm ssh patrick-tpu-v3-32 --zone europe-west4-a --worker 0 ``` Logout of the TPU worker: ``` logout ``` You should now be back in the CPU host.
8
0
hf_public_repos
hf_public_repos/bloom-jax-inference/run_generate.sh
source ~/venv/bin/activate export GOOGLE_APPLICATION_CREDENTIALS=~/bloom-jax-inference/key.json python ~/bloom-jax-inference/run_speed.py
9