text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# coding=utf-8 # Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities.""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from logging import captureWarnings as _captureWarnings from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "detail": logging.DEBUG, # will also print filename and line number "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = not hf_hub_utils.are_progress_bars_disabled() def _get_default_logging_level(): """ If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to `_default_log_level` """ env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176 if sys.stderr is None: sys.stderr = open(os.devnull, "w") _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) # if logging level is debug, we add pathname and lineno to formatter for easy debugging if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail": formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s") _default_handler.setFormatter(formatter) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict(): return log_levels def captureWarnings(capture): """ Calls the `captureWarnings` method from the logging library to enable management of the warnings emitted by the `warnings` library. Read more about this method here: https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module All warnings will be logged through the `py.warnings` logger. Careful: this method also adds a handler to this logger if it does not already have one, and updates the logging level of that logger to the library's root logger. """ logger = get_logger("py.warnings") if not logger.handlers: logger.addHandler(_default_handler) logger.setLevel(_get_library_root_logger().level) _captureWarnings(capture) def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Transformers's root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Transformers has following logging levels: - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - 40: `transformers.logging.ERROR` - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` - 20: `transformers.logging.INFO` - 10: `transformers.logging.DEBUG` </Tip>""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning(): """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug(): """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error(): """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice @functools.lru_cache(None) def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) logging.Logger.warning_once = warning_once class EmptyTqdm: """Dummy tqdm which doesn't do anything.""" def __init__(self, *args, **kwargs): # pylint: disable=unused-argument self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): """Return empty function.""" def empty_fn(*args, **kwargs): # pylint: disable=unused-argument return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active) def enable_progress_bar(): """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True hf_hub_utils.enable_progress_bars() def disable_progress_bar(): """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False hf_hub_utils.disable_progress_bars()
transformers/src/transformers/utils/logging.py/0
{ "file_path": "transformers/src/transformers/utils/logging.py", "repo_id": "transformers", "token_count": 4263 }
396
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule, OptionalDependencyNotAvailable, is_tokenizers_available {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_tf_available {% endif %} {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_torch_available {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} from ...utils import is_flax_available {% endif %} _import_structure = { "configuration_{{cookiecutter.lowercase_modelname}}": ["{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP", "{{cookiecutter.camelcase_modelname}}Config"], "tokenization_{{cookiecutter.lowercase_modelname}}": ["{{cookiecutter.camelcase_modelname}}Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_{{cookiecutter.lowercase_modelname}}_fast"] = ["{{cookiecutter.camelcase_modelname}}TokenizerFast"] {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_{{cookiecutter.lowercase_modelname}}"] = [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForMaskedLM", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}ForTokenClassification", "{{cookiecutter.camelcase_modelname}}Layer", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", "load_tf_weights_in_{{cookiecutter.lowercase_modelname}}", ] {% else %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_{{cookiecutter.lowercase_modelname}}"] = [ "{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "{{cookiecutter.camelcase_modelname}}ForCausalLM", "{{cookiecutter.camelcase_modelname}}Model", "{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_{{cookiecutter.lowercase_modelname}}"] = [ "TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST", "TF{{cookiecutter.camelcase_modelname}}ForMaskedLM", "TF{{cookiecutter.camelcase_modelname}}ForCausalLM", "TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "TF{{cookiecutter.camelcase_modelname}}ForTokenClassification", "TF{{cookiecutter.camelcase_modelname}}Layer", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% else %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_{{cookiecutter.lowercase_modelname}}"] = [ "TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "TF{{cookiecutter.camelcase_modelname}}Model", "TF{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_{{cookiecutter.lowercase_modelname}}"] = [ "Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM", "Flax{{cookiecutter.camelcase_modelname}}ForCausalLM", "Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification", "Flax{{cookiecutter.camelcase_modelname}}Layer", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% else %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_{{cookiecutter.lowercase_modelname}}"] = [ "Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration", "Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering", "Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification", "Flax{{cookiecutter.camelcase_modelname}}Model", "Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel", ] {% endif %} {% endif %} if TYPE_CHECKING: from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP, {{cookiecutter.camelcase_modelname}}Config from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_{{cookiecutter.lowercase_modelname}}_fast import {{cookiecutter.camelcase_modelname}}TokenizerFast {%- if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Layer, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, load_tf_weights_in_{{cookiecutter.lowercase_modelname}}, ) {% else %} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} {%- if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_{{cookiecutter.lowercase_modelname}} import ( TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}Layer, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_{{cookiecutter.lowercase_modelname}} import ( TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} {%- if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax %} {% if cookiecutter.is_encoder_decoder_model == "False" %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, Flax{{cookiecutter.camelcase_modelname}}Layer, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% else %} try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_{{cookiecutter.lowercase_modelname}} import ( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel, ) {% endif %} {% endif %} else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/__init__.py/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/__init__.py", "repo_id": "transformers", "token_count": 4961 }
397
{ "modelname": "Template", "uppercase_modelname": "TEMPLATE", "lowercase_modelname": "template", "camelcase_modelname": "Template", "authors": "The HuggingFace Team", "checkpoint_identifier": "brand-new-bert-base-cased", "tokenizer_type": "Based on BERT", "generate_tensorflow_pytorch_and_flax": "PyTorch, TensorFlow and Flax", "is_encoder_decoder_model": "False" }
transformers/templates/adding_a_new_model/tests/encoder-bert-tokenizer.json/0
{ "file_path": "transformers/templates/adding_a_new_model/tests/encoder-bert-tokenizer.json", "repo_id": "transformers", "token_count": 148 }
398
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import io import itertools import json import os import unittest from copy import deepcopy from functools import partial import datasets from parameterized import parameterized import tests.trainer.test_trainer import transformers from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import AutoModel, TrainingArguments, is_torch_available, logging from transformers.integrations.deepspeed import ( HfDeepSpeedConfig, is_deepspeed_available, unset_hf_deepspeed_config, ) from transformers.testing_utils import ( CaptureLogger, CaptureStd, CaptureStderr, LoggingLevel, TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_deepspeed, require_optuna, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_utils import get_last_checkpoint, set_seed from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_available_on_device if is_torch_available(): import torch from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") set_seed(42) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "google-t5/t5-small" T5_TINY = "patrickvonplaten/t5-tiny-random" GPT2_TINY = "sshleifer/tiny-gpt2" GPTJ_TINY = "hf-internal-testing/tiny-random-gptj" def load_json(path): with open(path) as f: return json.load(f) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base def require_deepspeed_aio(test_case): """ Decorator marking a test that requires deepspeed aio (nvme) """ if not is_deepspeed_available(): return unittest.skip("test requires deepspeed")(test_case) import deepspeed from deepspeed.ops.aio import AsyncIOBuilder if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]: return unittest.skip("test requires deepspeed async-io")(test_case) else: return test_case if is_deepspeed_available(): from deepspeed.utils import logger as deepspeed_logger # noqa from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from transformers.integrations.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled # noqa def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() ZERO2 = "zero2" ZERO3 = "zero3" FP16 = "fp16" BF16 = "bf16" HF_OPTIM = "hf_optim" HF_SCHEDULER = "hf_scheduler" DS_OPTIM = "ds_optim" DS_SCHEDULER = "ds_scheduler" optims = [HF_OPTIM, DS_OPTIM] schedulers = [HF_SCHEDULER, DS_SCHEDULER] stages = [ZERO2, ZERO3] if is_torch_bf16_available_on_device(torch_device): dtypes = [FP16, BF16] else: dtypes = [FP16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, dtypes)) params_with_optims_and_schedulers = list(itertools.product(stages, dtypes, optims, schedulers)) @require_deepspeed @require_torch_accelerator class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon): """ Testing non-Trainer DeepSpeed integration """ def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } def tearDown(self): super().tearDown() # reset the ds config global so that tests state doesn't leak unset_hf_deepspeed_config() def test_init_zero3_fp16(self): # test that zero.Init() works correctly under zero3/fp16 ds_config = { "train_batch_size": 1, "zero_optimization": { "stage": 3, }, } dschf = HfDeepSpeedConfig(ds_config) self.assertTrue(dschf.is_zero3()) self.assertTrue(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: AutoModel.from_pretrained(T5_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) # now remove zero optimization del ds_config["zero_optimization"] dschf = HfDeepSpeedConfig(ds_config) self.assertFalse(dschf.is_zero3()) self.assertFalse(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: AutoModel.from_pretrained(T5_TINY) self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out) def test_init_zero3_missing_params(self): # test that zero.Init() for missing parameters works correctly under zero3 import deepspeed import torch from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel class TinyGPT2WithUninitializedWeights(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = AutoModel.from_pretrained(GPT2_TINY, config=config) self.new_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=True) def forward(self, *args, **kwargs): transformer_outputs = self.transformer(*args, **kwargs) hidden_states = transformer_outputs[0] return self.new_head(hidden_states).float() def _init_weights(self, module): super()._init_weights(module) if module is self.new_head: self.new_head.weight.data.fill_(-100.0) self.new_head.bias.data.fill_(+100.0) ds_config = { "train_batch_size": 1, "zero_optimization": { "stage": 3, }, } dschf = HfDeepSpeedConfig(ds_config) self.assertTrue(dschf.is_zero3()) self.assertTrue(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = TinyGPT2WithUninitializedWeights.from_pretrained(GPT2_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) self.assertRegex(cl.out, r"newly initialized.*new_head\.bias.*new_head\.weight") with deepspeed.zero.GatheredParameters([model.new_head.weight, model.new_head.bias]): self.assertTrue( torch.allclose(model.new_head.weight, torch.tensor(-100.0, device=model.new_head.weight.device)), ) self.assertTrue( torch.allclose(model.new_head.bias, torch.tensor(+100.0, device=model.new_head.bias.device)), ) # now remove zero optimization del ds_config["zero_optimization"] dschf = HfDeepSpeedConfig(ds_config) self.assertFalse(dschf.is_zero3()) self.assertFalse(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = TinyGPT2WithUninitializedWeights.from_pretrained(GPT2_TINY) self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out) self.assertRegex(cl.out, r"newly initialized.*new_head\.bias.*new_head\.weight") self.assertTrue( torch.allclose(model.new_head.weight, torch.tensor(-100.0, device=model.new_head.weight.device)), ) self.assertTrue( torch.allclose(model.new_head.bias, torch.tensor(+100.0, device=model.new_head.bias.device)), ) def test_arange_bf16(self): # Tests that configuring DeepSpeed with 16 bits does not cause float `torch.arange()` tensors to be cast down. # NOTE -- this assumes that the function calls have the following downcast-preventing pattern, i.e. # `torch.arange(...,dtype=torch.int64)` followed by a cast like `.to(torch.float32)`. 🚨 If this pattern is # NOT applied (e.g. `torch.arange(...,dtype=torch.float32)` is used), DeepSpeed can automatically cast it down # at init time. See https://github.com/huggingface/transformers/issues/28685 for more info. ds_config = { "train_batch_size": 1, "zero_optimization": { "stage": 3, }, "bf16": {"enabled": True}, } dschf = HfDeepSpeedConfig(ds_config) self.assertTrue(dschf.is_zero3()) self.assertTrue(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = AutoModel.from_pretrained(GPTJ_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) # The model weights are in BF16 as per deepspeed config self.assertTrue(str(model.h[0].attn.q_proj.weight.dtype) == "torch.bfloat16") good_deepspeed_sin_cos = model.h[0].attn.embed_positions # Monkeypatches the function that creates RoPE embeddings using the INCORRECT torch.arange() pattern, and # then recreates the model def bad_deepspeed_create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)) # Incorrect pattern here: torch.arange has dtype=torch.float32 as its argument, and it will automatically # converted to BF16 by DeepSpeed sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=inv_freq.dtype), inv_freq) return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) good_deepspeed_create_sinusoidal_positions = transformers.models.gptj.modeling_gptj.create_sinusoidal_positions transformers.models.gptj.modeling_gptj.create_sinusoidal_positions = bad_deepspeed_create_sinusoidal_positions with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = AutoModel.from_pretrained(GPTJ_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) self.assertTrue(str(model.h[0].attn.q_proj.weight.dtype) == "torch.bfloat16") bad_deepspeed_sin_cos = model.h[0].attn.embed_positions # Compares the two values: the two sets of values are different, and the correct one matches the torch # (i.e. outside DeepSpeed) version. good_torch_sin_cos = good_deepspeed_create_sinusoidal_positions( model.config.max_position_embeddings, model.config.rotary_dim ) self.assertFalse(torch.allclose(good_deepspeed_sin_cos, bad_deepspeed_sin_cos)) self.assertTrue(torch.allclose(good_torch_sin_cos, good_deepspeed_sin_cos.cpu())) # Finally, we can see that the incorrect pattern is okay on vanilla torch, demostrating that this issue is # exclusive to DeepSpeed bad_torch_sin_cos = bad_deepspeed_create_sinusoidal_positions( model.config.max_position_embeddings, model.config.rotary_dim ) self.assertTrue(torch.allclose(bad_torch_sin_cos, good_torch_sin_cos)) class TrainerIntegrationDeepSpeedWithCustomConfig(TestCasePlus): def setUp(self): super().setUp() args = TrainingArguments(".") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.ds_config_file = { "zero2": f"{self.test_file_dir_str}/ds_config_zero2.json", "zero3": f"{self.test_file_dir_str}/ds_config_zero3.json", } # use self.get_config_dict(stage) to use these to ensure the original is not modified with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f: config_zero2 = json.load(f) with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: config_zero3 = json.load(f) # The following setting slows things down, so don't enable it by default unless needed by a test. # It's in the file as a demo for users since we want everything to work out of the box even if slower. config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False self.ds_config_dict = { "zero2": config_zero2, "zero3": config_zero3, } def tearDown(self): super().tearDown() # reset the ds config global so that tests state doesn't leak unset_hf_deepspeed_config() def get_config_dict(self, stage): # As some tests modify the dict, always make a copy return deepcopy(self.ds_config_dict[stage]) @require_deepspeed @require_torch_accelerator class TrainerIntegrationDeepSpeed(TrainerIntegrationDeepSpeedWithCustomConfig, TrainerIntegrationCommon): """ This class is for testing directly via get_regression_trainer It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods which we can re-use here. Important: this class' setup can only work with a single gpu because it runs within the current pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher. Note: if any of the tests of this class get run there will be at least one gpu occupied by them until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels won't be released until this pytest worker exits. This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This is not a bug. """ # --- These tests are enough to run on one of zero stages --- # def test_hf_ds_config_mismatch(self): ds_config = self.get_config_dict(ZERO2) # Purposefully configure these values to mismatch TrainingArguments values. # This currently doesn't cover all keys (but it could) per_device_train_batch_size = 2 ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2 ds_config["train_batch_size"] = 1000 gradient_accumulation_steps = 2 ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2 max_grad_norm = 1.0 ds_config["gradient_clipping"] = max_grad_norm + 0.1 adam_beta1, adam_beta2 = 0.9, 0.99 ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1] fp16 = True ds_config["fp16"]["enabled"] = not fp16 keys = [ "per_device_train_batch_size", "train_batch_size", "gradient_accumulation_steps", "max_grad_norm", "betas", "fp16", ] with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer( local_rank=0, fp16=fp16, deepspeed=ds_config, per_device_train_batch_size=per_device_train_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, max_grad_norm=max_grad_norm, adam_beta1=adam_beta1, adam_beta2=adam_beta2, ) with self.assertRaises(Exception) as context: trainer.train() for key in keys: self.assertTrue( key in str(context.exception), f"{key} is not in the exception message:\n{context.exception}", ) # Test various combos # 1. DS scheduler + DS optimizer: this is already tested by most other tests # 2. HF scheduler + HF optimizer: # 3. DS scheduler + HF optimizer: # 4. HF scheduler + DS optimizer: def test_hf_scheduler_hf_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) def test_ds_scheduler_hf_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) def test_hf_scheduler_ds_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) @require_deepspeed_aio def test_stage3_nvme_offload(self): with mockenv_context(**self.dist_env_1_gpu): # this actually doesn't have to be on NVMe, any storage will do since this test only # runs a simple check that we can use some directory as if it were NVMe nvme_path = self.get_auto_remove_tmp_dir() nvme_config = {"device": "nvme", "nvme_path": nvme_path} ds_config_zero3_dict = self.get_config_dict(ZERO3) ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @require_optuna def test_hyperparameter_search(self): with mockenv_context(**self.dist_env_1_gpu): ds_config_zero3_dict = self.get_config_dict(ZERO3) # hyperparameter_search requires model_init() to recreate the model for each trial def model_init(): config = RegressionModelConfig(a=0, b=0, double_output=False) model = RegressionPreTrainedModel(config) return model trainer = get_regression_trainer( local_rank=0, fp16=True, model_init=model_init, deepspeed=ds_config_zero3_dict, ) n_trials = 3 with CaptureLogger(deepspeed_logger) as cl: with CaptureStd() as cs: trainer.hyperparameter_search(direction="maximize", n_trials=n_trials) self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") self.assertIn(f"Trial {n_trials-1} finished with value", cs.err, "expected hyperparameter_search output") self.assertIn("Best is trial", cs.err, "expected hyperparameter_search output") # --- These tests need to run on both zero stages --- # @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_hf_optimizer_with_offload(self, stage, dtype): # non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB)) ds_config_dict = self.get_config_dict(stage) del ds_config_dict["optimizer"] # force default HF Trainer optimizer # force cpu offload ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu" ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam with mockenv_context(**self.dist_env_1_gpu): kwargs = {"local_rank": 0, "deepspeed": ds_config_dict} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_fake_notebook_no_launcher(self, stage, dtype): # this setup emulates a notebook where a launcher needs to be emulated by hand # note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture # DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if # it's run not as a first test as `sys.stdout` will no longer be the same. So we either have # to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger. with mockenv_context(**self.dist_env_1_gpu): kwargs = {"local_rank": 0, "deepspeed": self.get_config_dict(stage)} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_early_get_last_lr(self, stage, dtype): # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may # not run for the first few dozen steps while loss scale is too large, and thus during # that time `get_last_lr` will fail if called during that warm up stage, # # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step. with mockenv_context(**self.dist_env_1_gpu): a = b = 0.0 kwargs = { "a": a, "b": b, "local_rank": 0, "train_len": 8, "deepspeed": self.get_config_dict(stage), "per_device_train_batch_size": 8, "logging_steps": 1, } kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) trainer.train() post_train_a = trainer.model.a.item() # XXX: for some reason the following check fails with zero3/fp16 and any/bf16 - not a # broken but a different qualitative outcome - as if optimizer did run # oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere # print(trainer.model.a.item()) # print(trainer.model.b.item()) # need to investigate at some point if (stage == ZERO3 and dtype == FP16) or (dtype == BF16): return # it's enough that train didn't fail for this test, but we must check that # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing) self.assertEqual(post_train_a, a) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_gradient_accumulation(self, stage, dtype): # this test measures that we get identical weights and similar loss with: # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1 # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2 # since the 2nd should produce the effective batch of 1st, with the same results # # I can get an identical loss for a small train_len=32, plus the power of the initial # dynamic loss scale value set to: # "fp16.initial_scale_power": 1 # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup. # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical train_len = 64 a = b = 0.0 kwargs = { "a": a, "b": b, "local_rank": 0, "train_len": train_len, "deepspeed": self.get_config_dict(stage), } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): no_grad_accum_trainer = get_regression_trainer( **kwargs, per_device_train_batch_size=16, gradient_accumulation_steps=1, ) no_grad_accum_result = no_grad_accum_trainer.train() no_grad_accum_loss = no_grad_accum_result.training_loss no_grad_accum_a = no_grad_accum_trainer.model.a.item() no_grad_accum_b = no_grad_accum_trainer.model.b.item() # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger self.assertNotEqual(no_grad_accum_a, a) with mockenv_context(**self.dist_env_1_gpu): yes_grad_accum_trainer = get_regression_trainer( **kwargs, per_device_train_batch_size=4, gradient_accumulation_steps=4, ) yes_grad_accum_result = yes_grad_accum_trainer.train() yes_grad_accum_loss = yes_grad_accum_result.training_loss yes_grad_accum_a = yes_grad_accum_trainer.model.a.item() yes_grad_accum_b = yes_grad_accum_trainer.model.b.item() self.assertNotEqual(yes_grad_accum_a, a) # training with half the batch size but accumulation steps as 2 should give the same # weights, but sometimes get a slight difference still of 1e-6 self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5) self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5) # Relative difference. See the note above how to get identical loss on a small bs self.assertTrue((no_grad_accum_loss - yes_grad_accum_loss) / (no_grad_accum_loss + 1e-15) <= 1e-3) def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtype): # adapted from TrainerIntegrationCommon.check_saved_checkpoints file_list = [SAFE_WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"] if stage == ZERO2: ds_file_list = ["mp_rank_00_model_states.pt"] elif stage == ZERO3: ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"] else: raise ValueError(f"unknown stage {stage}") if dtype == "bf16": ds_file_list.append("bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found") # common files for filename in file_list: path = os.path.join(checkpoint, filename) self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found") # ds files ds_path = os.path.join(checkpoint, f"global_step{step}") for filename in ds_file_list: # filename = os.path.join(path, filename) # print(filename) path = os.path.join(ds_path, filename) self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_save_checkpoints(self, stage, dtype): # adapted from TrainerIntegrationTest.test_save_checkpoints freq = 5 output_dir = self.get_auto_remove_tmp_dir() ds_config_dict = self.get_config_dict(stage) if dtype == FP16: ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step # XXX: if stage == ZERO3: ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True # save checkpoints with mockenv_context(**self.dist_env_1_gpu): kwargs = { "output_dir": output_dir, "save_steps": freq, "deepspeed": ds_config_dict, } kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) trainer.train() total = int(self.n_epochs * 64 / self.batch_size) self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage, dtype) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_can_resume_training_errors(self, stage, dtype): with mockenv_context(**self.dist_env_1_gpu): ds_config_dict = self.get_config_dict(stage) output_dir = self.get_auto_remove_tmp_dir() kwargs = {"output_dir": output_dir, "deepspeed": ds_config_dict} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) # 1. fail to find any checkpoint - due a fresh output_dir with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue( "No valid checkpoint found in output directory" in str(context.exception), f"got exception: {context.exception}", ) # 2. fail to find a bogus checkpoint with self.assertRaises(Exception) as context: checkpoint = os.path.join(output_dir, "checkpoint-5") trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") @parameterized.expand(params_with_optims_and_schedulers, name_func=parameterized_custom_name_func) def test_can_resume_training_normal(self, stage, dtype, optim, scheduler): # adapted from TrainerIntegrationTest.test_can_resume_training # test normal resume for each stage separately, error-handling is tested in a different test # ToDo: Currently, hf_optim + hf_scheduler resumes with the correct states and # also has same losses for few steps but then slowly diverges. Need to figure it out. if optim == HF_OPTIM and scheduler == HF_SCHEDULER: return output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) ds_config_dict = self.get_config_dict(stage) if dtype == FP16: ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step # XXX: if stage == ZERO3: ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True if optim == HF_OPTIM: del ds_config_dict["optimizer"] if scheduler == HF_SCHEDULER: del ds_config_dict["scheduler"] kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "deepspeed": ds_config_dict, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(output_dir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(output_dir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Finally, should be able to resume with the same trainer/same deepspeed engine instance # XXX: but currently this not possible due DS bug: https://github.com/microsoft/DeepSpeed/issues/1612 # trainer.train(resume_from_checkpoint=checkpoint) # a workaround needs to be used that re-creates the deepspeed engine @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_load_state_dict_from_zero_checkpoint(self, stage, dtype): # test that we can load fp32 weights directly from the zero checkpoint into the current model output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False) ds_config_dict = self.get_config_dict(stage) kwargs = { "output_dir": output_dir, "train_len": 4, "per_device_train_batch_size": 4, "num_train_epochs": 1, "save_strategy": "steps", "save_steps": 1, "learning_rate": 0.1, "deepspeed": ds_config_dict, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint_dir = get_last_checkpoint(output_dir) model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) (a1, b1) = model.a.item(), model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_config_object(self): # test that we can switch from zero2 to zero3 in the same process for example # test is_zero, etc. output_dir = self.get_auto_remove_tmp_dir() kwargs = {"output_dir": output_dir, "train_len": 8, "fp16": True} ds_config_zero3_dict = self.get_config_dict(ZERO3) ds_config_zero2_dict = self.get_config_dict(ZERO2) with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs) self.assertTrue(is_deepspeed_zero3_enabled()) # test we can repeat that and with train this time trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs) trainer.train() self.assertTrue(is_deepspeed_zero3_enabled()) # test zero3 is disabled trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs) self.assertFalse(is_deepspeed_zero3_enabled()) # check config obj config = deepspeed_config() self.assertTrue(bool(config), "Deepspeed config should be accessible") # with accelerate integration below line is additionally required for this test to pass trainer.accelerator.state._reset_state() del trainer # now weakref should gc the global and we shouldn't get anything here config = deepspeed_config() self.assertFalse(is_deepspeed_zero3_enabled()) self.assertFalse(bool(config), "Deepspeed config should not be accessible") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_load_best_model(self, stage, dtype): # Test that forced deepspeed reinit doesn't break the model. the forced re-init after # loading the best model in Trainer is there to workaround this bug in Deepspeed # https://github.com/microsoft/DeepSpeed/issues/1612 # # The test is derived from a repro script submitted in this Issue: # https://github.com/huggingface/transformers/issues/17114 # # One additional feature of this test is that we use a non-AdamW optimizer to test that # deepspeed doesn't fallback to AdamW, which would prevent the optimizer states from loading # correctly from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer # noqa output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False) ds_config_dict = self.get_config_dict(stage) del ds_config_dict["optimizer"] # will use HF Trainer optimizer del ds_config_dict["scheduler"] # will use HF Trainer scheduler ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam # must use this setting to get the reload path exercised ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True with mockenv_context(**self.dist_env_1_gpu): args_dict = { "per_device_train_batch_size": 1, "per_device_eval_batch_size": 1, "gradient_accumulation_steps": 1, "learning_rate": 1e-4, "num_train_epochs": 1, "do_train": True, "do_eval": True, "optim": "adafactor", "evaluation_strategy": "steps", "eval_steps": 1, "save_strategy": "steps", "save_steps": 1, "load_best_model_at_end": True, "max_steps": 1, "deepspeed": ds_config_dict, "report_to": "none", } training_args = TrainingArguments(output_dir, **args_dict) tokenizer = T5Tokenizer.from_pretrained(T5_TINY) model = T5ForConditionalGeneration.from_pretrained(T5_TINY) def _add_eos_to_examples(example): example["input_text"] = f"question: {example['question']} context: {example['context']}" example["target_text"] = example["answers"]["text"][0] if len(example["answers"]["text"]) > 0 else "" return example def _convert_to_features(example_batch): input_encodings = tokenizer.batch_encode_plus( example_batch["input_text"], pad_to_max_length=True, max_length=512, truncation=True ) target_encodings = tokenizer.batch_encode_plus( example_batch["target_text"], pad_to_max_length=True, max_length=16, truncation=True ) encodings = { "input_ids": input_encodings["input_ids"], "attention_mask": input_encodings["attention_mask"], "labels": target_encodings["input_ids"], } return encodings def get_dataset(): data_file = str(self.tests_dir / "fixtures/tests_samples/SQUAD/sample.json") data_files = {"train": data_file, "validation": data_file} raw_datasets = datasets.load_dataset("json", data_files=data_files, field="data") train_dataset = raw_datasets["train"].map(_add_eos_to_examples).map(_convert_to_features, batched=True) valid_dataset = deepcopy(train_dataset) return train_dataset, valid_dataset train_dataset, eval_dataset = get_dataset() trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, ) trainer.train() # crash 1 was here trainer.evaluate() # crash 2 was here @slow @require_deepspeed @require_torch_accelerator class TestDeepSpeedWithLauncher(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" # Tests to devise # # # 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that # the 2 gpus will generate prediction sequences that aren't of the same length - this is because # we had to code a special feature to sync the gpus when the predicted sequences aren't of the # same length. In general this will tested as a side-effect through a variety of other tests - # it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as # long as we have a few full tests running on zero3 + predict_with_generate this should be # mostly covered. # # but there are 5 variations on beam search in `generate`- with identical code branched with `if # synced_gpus` # # 2. most tests should probably be run on both: zero2 and zero3 configs # @parameterized.expand(params, name_func=parameterized_custom_name_func) @require_torch_multi_accelerator def test_basic_distributed(self, stage, dtype): self.run_and_check(stage=stage, dtype=dtype, distributed=True) def test_do_eval_no_train(self): # testing only zero3 since zero2 makes no sense with inference self.run_and_check( stage=ZERO3, dtype=FP16, eval_steps=1, distributed=False, do_train=False, do_eval=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_fp32_non_distributed(self, stage, dtype): # real model needs too much GPU memory under stage2+fp32, so using tiny random model here - # therefore no quality checks, just basic completion checks are done self.run_and_check( stage=stage, dtype=dtype, model_name=T5_TINY, distributed=False, do_train=True, do_eval=True, quality_checks=False, fp32=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) @require_torch_multi_accelerator def test_fp32_distributed(self, stage, dtype): # real model needs too much GPU memory under stage2+fp32, so using tiny random model here - # therefore no quality checks, just basic completion checks are done self.run_and_check( stage=stage, dtype=dtype, model_name=T5_TINY, distributed=True, do_train=True, do_eval=True, quality_checks=False, fp32=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_resume_train_not_from_ds_checkpoint(self, stage, dtype): # do normal training and then resume not from the deepspeed checkpoint but explicitly from # the saved model dir do_train = True do_eval = False kwargs = { "stage": stage, "dtype": dtype, "eval_steps": 1, "distributed": True, "do_train": do_train, "do_eval": do_eval, } # 1. normal training output_dir = self.run_and_check(**kwargs) # 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir # - i.e. the same path the model was saved to in step 1 output_dir = self.run_trainer(**kwargs, model_name=output_dir) self.do_checks(output_dir, do_train=do_train, do_eval=do_eval) @parameterized.expand(["bf16", "fp16", "fp32"]) @require_torch_multi_accelerator def test_inference(self, dtype): if dtype == "bf16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest("test requires bfloat16 hardware support") # this is just inference, so no optimizer should be loaded # it only works for z3 (makes no sense with z1-z2) fp32 = True if dtype == "fp32" else False self.run_and_check( stage=ZERO3, dtype=FP16, model_name=T5_TINY, distributed=True, do_train=False, do_eval=True, quality_checks=False, fp32=fp32, ) def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True): if do_train: train_metrics = load_json(os.path.join(output_dir, "train_results.json")) self.assertIn("train_samples_per_second", train_metrics) if quality_checks: self.assertGreater(train_metrics["train_samples_per_second"], 0.5) if do_eval: eval_metrics = load_json(os.path.join(output_dir, "eval_results.json")) self.assertIn("eval_bleu", eval_metrics) if quality_checks: self.assertGreater(eval_metrics["eval_bleu"], 1) # XXX: need to do better validation beyond just that the run was successful def run_and_check( self, stage, dtype, model_name: str = T5_SMALL, eval_steps: int = 10, distributed: bool = True, do_train: bool = True, do_eval: bool = True, quality_checks: bool = True, fp32: bool = False, extra_args_str: str = None, remove_args_str: str = None, ): # we are doing quality testing so using a small real model output_dir = self.run_trainer( stage=stage, dtype=dtype, model_name=model_name, eval_steps=eval_steps, num_train_epochs=1, do_train=do_train, do_eval=do_eval, distributed=distributed, fp32=fp32, extra_args_str=extra_args_str, remove_args_str=remove_args_str, ) self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks) return output_dir def run_trainer( self, stage: str, dtype: str, model_name: str, eval_steps: int = 10, num_train_epochs: int = 1, do_train: bool = False, do_eval: bool = True, distributed: bool = True, fp32: bool = False, extra_args_str: str = None, remove_args_str: str = None, ): max_len = 32 data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --output_dir {output_dir} --overwrite_output_dir --max_source_length {max_len} --max_target_length {max_len} --val_max_target_length {max_len} --warmup_steps 8 --predict_with_generate --save_steps 0 --eval_steps {eval_steps} --group_by_length --label_smoothing_factor 0.1 --source_lang en --target_lang ro --report_to none """.split() args.extend(["--source_prefix", '"translate English to Romanian: "']) if not fp32: args.extend([f"--{dtype}"]) actions = 0 if do_train: actions += 1 args.extend( f""" --do_train --num_train_epochs {str(num_train_epochs)} --max_train_samples 16 --per_device_train_batch_size 2 --learning_rate 3e-3 """.split() ) if do_eval: actions += 1 args.extend( """ --do_eval --max_eval_samples 16 --per_device_eval_batch_size 2 """.split() ) assert actions > 0, "need at least do_train or do_eval for the test to run" if extra_args_str is not None: args.extend(extra_args_str.split()) # currently only works for bool args if remove_args_str is not None: remove_args = remove_args_str.split() args = [x for x in args if x not in remove_args] ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"] launcher = get_launcher(distributed) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) return output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_clm(self, stage, dtype): # this test exercises model.resize_token_embeddings() which requires param gathering outside # of forward - it's not used by `run_translation.py`, but it is in `run_clm.py` data_dir = self.tests_dir / "fixtures" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_name_or_path {GPT2_TINY} --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} --overwrite_output_dir --do_train --do_eval --max_train_samples 16 --max_eval_samples 16 --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --num_train_epochs 1 --warmup_steps 8 --block_size 64 --report_to none """.split() args.extend([f"--{dtype}"]) ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"] launcher = get_launcher(distributed=True) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) def test_clm_from_config_zero3_fp16(self): # this test exercises AutoModel.from_config(config) - to ensure zero.Init is called data_dir = self.tests_dir / "fixtures" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_type gpt2 --tokenizer_name {GPT2_TINY} --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} --overwrite_output_dir --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --warmup_steps 8 --block_size 8 --fp16 --report_to none """.split() ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split() script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"] launcher = get_launcher(distributed=True) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die with CaptureStderr() as cs: execute_subprocess_async(cmd, env=self.get_env()) self.assertIn("Detected DeepSpeed ZeRO-3", cs.err)
transformers/tests/deepspeed/test_deepspeed.py/0
{ "file_path": "transformers/tests/deepspeed/test_deepspeed.py", "repo_id": "transformers", "token_count": 25323 }
399
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
transformers/tests/generation/test_beam_constraints.py/0
{ "file_path": "transformers/tests/generation/test_beam_constraints.py", "repo_id": "transformers", "token_count": 1723 }
400
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import AlbertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.albert.modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertModel, ) class TFAlbertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, embedding_size=16, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.embedding_size = 16 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, embedding_size=self.embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_albert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertModel(config=config) # inputs = {'input_ids': input_ids, # 'attention_mask': input_mask, # 'token_type_ids': token_type_ids} # sequence_output, pooled_output = model(**inputs) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_albert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, self.num_labels)) def create_and_check_albert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_albert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_albert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_albert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFAlbertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices]) def create_and_check_albert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFAlbertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFAlbertModel, TFAlbertForPreTraining, TFAlbertForMaskedLM, TFAlbertForSequenceClassification, TFAlbertForQuestionAnswering, TFAlbertForTokenClassification, TFAlbertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFAlbertModel, "fill-mask": TFAlbertForMaskedLM, "question-answering": TFAlbertForQuestionAnswering, "text-classification": TFAlbertForSequenceClassification, "token-classification": TFAlbertForTokenClassification, "zero-shot": TFAlbertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["sentence_order_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFAlbertModelTester(self) self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_albert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_multiple_choice(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_sequence_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFAlbertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFAlbertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFAlbertForPreTraining.from_pretrained("albert/albert-base-v2") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 30000] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [4.595668, 0.74462754, -1.818147], [4.5954347, 0.7454184, -1.8188258], [4.5954905, 0.7448235, -1.8182316], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/albert/test_modeling_tf_albert.py/0
{ "file_path": "transformers/tests/models/albert/test_modeling_tf_albert.py", "repo_id": "transformers", "token_count": 5943 }
401
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPT2Config, T5Config, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPT2LMHeadModel, TFRobertaForMaskedLM, TFT5ForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class NewModelConfig(BertConfig): model_type = "new-model" if is_tf_available(): class TFNewModel(TFBertModel): config_class = NewModelConfig @require_tf class TFAutoModelTest(unittest.TestCase): @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-cased" config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertModel) @slow def test_model_for_pretraining_from_pretrained(self): model_name = "google-bert/bert-base-cased" config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForPreTraining.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForPreTraining) @slow def test_model_for_causal_lm(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = TFAutoModelForCausalLM.from_pretrained(model_name) model, loading_info = TFAutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFGPT2LMHeadModel) @slow def test_lmhead_model_from_pretrained(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelWithLMHead.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) @slow def test_model_for_masked_lm(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForMaskedLM.from_pretrained(model_name) model, loading_info = TFAutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name) model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFT5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForSequenceClassification.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google-bert/bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForQuestionAnswering.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForQuestionAnswering) @slow @require_tensorflow_probability def test_table_question_answering_model_from_pretrained(self): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, TapasConfig) model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_name) model, loading_info = TFAutoModelForTableQuestionAnswering.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFTapasForQuestionAnswering) def test_from_pretrained_identifier(self): model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(model, TFBertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(model, TFRobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_pretrained_with_tuple_values(self): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel model = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny") self.assertIsInstance(model, TFFunnelModel) config = copy.deepcopy(model.config) config.architectures = ["FunnelBaseModel"] model = TFAutoModel.from_config(config) model.build_in_name_scope() self.assertIsInstance(model, TFFunnelBaseModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = TFAutoModel.from_pretrained(tmp_dir) self.assertIsInstance(model, TFFunnelBaseModel) def test_new_model_registration(self): try: AutoConfig.register("new-model", NewModelConfig) auto_classes = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__): # Wrong config class will raise an error with self.assertRaises(ValueError): auto_class.register(BertConfig, TFNewModel) auto_class.register(NewModelConfig, TFNewModel) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(ValueError): auto_class.register(BertConfig, TFBertModel) # Now that the config is registered, it can be used as any other config with the auto-API tiny_config = BertModelTester(self).get_config() config = NewModelConfig(**tiny_config.to_dict()) model = auto_class.from_config(config) model.build_in_name_scope() self.assertIsInstance(model, TFNewModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = auto_class.from_pretrained(tmp_dir) self.assertIsInstance(new_model, TFNewModel) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = TFAutoModel.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = TFAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_model_file_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ): _ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model") def test_model_from_pt_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"): _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") def test_cached_model_has_minimum_calls_to_head(self): # Make sure we have cached the model. _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) # With a sharded checkpoint _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1)
transformers/tests/models/auto/test_modeling_tf_auto.py/0
{ "file_path": "transformers/tests/models/auto/test_modeling_tf_auto.py", "repo_id": "transformers", "token_count": 5886 }
402
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi_projection from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "cl-tohoku/bert-base-japanese" tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False space_between_special_tokens = True def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", "アップルストア", "外国", "##人", "参政", "##権", "此れ", "は", "猫", "です", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。") self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) def test_pickle_mecab_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="mecab") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) def test_mecab_full_tokenizer_with_mecab_kwargs(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="mecab", mecab_kwargs={"mecab_dic": "ipadic"} ) text = "アップルストア" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["アップルストア"]) def test_mecab_tokenizer_ipadic(self): tokenizer = MecabTokenizer(mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic_lite(self): try: tokenizer = MecabTokenizer(mecab_dic="unidic_lite") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic(self): try: import unidic self.assertTrue( os.path.isdir(unidic.DICDIR), "The content of unidic was not downloaded. Run `python -m unidic download` before running this test case. Note that this requires 2.1GB on disk.", ) tokenizer = MecabTokenizer(mecab_dic="unidic") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_lower(self): tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_with_option(self): try: tokenizer = MecabTokenizer( do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"], ) def test_mecab_tokenizer_no_normalize(self): tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"], ) @require_sudachi_projection def test_pickle_sudachi_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="sudachi") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_sudachi_projection def test_sudachi_tokenizer_core(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core") # fmt: off self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], ) # fmt: on @require_sudachi_projection def test_sudachi_tokenizer_split_mode_A(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_B(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_C(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_split_mode_B(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_split_mode": "B"} ) self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "##人", "参政", "##権"]) @require_sudachi_projection def test_sudachi_tokenizer_projection(self): tokenizer = SudachiTokenizer( sudachi_dict_type="core", sudachi_split_mode="A", sudachi_projection="normalized_nouns" ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_sudachi_projection(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_projection": "normalized_nouns"} ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_tokenizer_lower(self): tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_no_normalize(self): tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_trim_whitespace(self): tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) @require_jumanpp def test_pickle_jumanpp_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="jumanpp") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_jumanpp def test_jumanpp_tokenizer(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"]) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_lower(self): tokenizer = JumanppTokenizer(do_lower_case=True) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_no_normalize(self): tokenizer = JumanppTokenizer(normalize_text=False) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_trim_whitespace(self): tokenizer = JumanppTokenizer(trim_whitespace=True) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"], ) @require_jumanpp def test_jumanpp_full_tokenizer_with_jumanpp_kwargs_trim_whitespace(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="jumanpp", jumanpp_kwargs={"trim_whitespace": True} ) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) @require_jumanpp def test_jumanpp_tokenizer_ext(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。"), ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"], ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] # fmt: skip vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"]) self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"]) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) # fmt: skip def test_sentencepiece_tokenizer(self): tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") subword_tokenizer = tokenizer.subword_tokenizer tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) # fmt: skip tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "cl-tohoku/bert-base-japanese" tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type="character", **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character") tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。") self.assertListEqual(tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]) # fmt: skip self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def test_character_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"]) self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class AutoTokenizerCustomTest(unittest.TestCase): def test_tokenizer_bert_japanese(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" tokenizer = AutoTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertIsInstance(tokenizer, BertJapaneseTokenizer) class BertTokenizerMismatchTest(unittest.TestCase): def test_tokenizer_mismatch_warning(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers", level="WARNING") as cm: BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) EXAMPLE_BERT_ID = "google-bert/bert-base-cased" with self.assertLogs("transformers", level="WARNING") as cm: BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
transformers/tests/models/bert_japanese/test_tokenization_bert_japanese.py/0
{ "file_path": "transformers/tests/models/bert_japanese/test_tokenization_bert_japanese.py", "repo_id": "transformers", "token_count": 10470 }
403
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size # @timeout_decorator.timeout(1) # not working with the decorator so far def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBlenderbotModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot-400M-distill") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @unittest.skipUnless(jax_device != "cpu", "3B test too slow on CPU.") @slow def test_generation_from_short_input_same_as_parlai_3B(self): FASTER_GEN_KWARGS = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} TOK_DECODE_KW = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B", from_pt=True) tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") src_text = ["Sam"] model_inputs = tokenizer(src_text, return_tensors="jax") generated_utterances = model.generate(**model_inputs, **FASTER_GEN_KWARGS) tgt_text = 'Sam is a great name. It means "sun" in Gaelic.' generated_txt = tokenizer.batch_decode(generated_utterances, **TOK_DECODE_KW) assert generated_txt[0].strip() == tgt_text
transformers/tests/models/blenderbot/test_modeling_flax_blenderbot.py/0
{ "file_path": "transformers/tests/models/blenderbot/test_modeling_flax_blenderbot.py", "repo_id": "transformers", "token_count": 8215 }
404
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BLIP-2 model. """ import inspect import tempfile import unittest import numpy as np import requests from transformers import CONFIG_MAPPING, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig from transformers.testing_utils import ( require_torch, require_torch_multi_accelerator, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Blip2ForConditionalGeneration, Blip2Model, Blip2VisionModel from transformers.models.blip_2.modeling_blip_2 import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import Blip2Processor class Blip2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Blip2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = Blip2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as BLIP-2's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Blip2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Blip2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Blip2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BLIP-2's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Blip2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Blip2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class Blip2QFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Blip2QFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) # this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py class Blip2TextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id # Eos Token attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) # this model tester uses a decoder-only language model (OPT) class Blip2ForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelDecoderOnlyTester(parent, **text_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation(self, config, input_ids, attention_mask, pixel_values): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class Blip2ForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_to_base(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Blip2Config and check if we can load Blip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Blip2Config and check if we can load Blip2QFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST: model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # this class is based on `T5ModelTester` found in tests/models/t5/test_modeling_t5.py class Blip2TextModelTester: def __init__( self, parent, vocab_size=99, batch_size=12, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return CONFIG_MAPPING["t5"]( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) # this model tester uses an encoder-decoder language model (T5) class Blip2ModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelTester(parent, **text_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() ( _, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, lm_labels def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels ): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": labels, } return config, inputs_dict @require_torch class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration, Blip2Model) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Blip2Model, "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2ModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Blip2Config and check if we can load Blip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Blip2Config and check if we can load Blip2QFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST: model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) def test_get_text_features(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = { "input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), "attention_mask": torch.LongTensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).to(torch_device), "decoder_input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), } model = Blip2Model(config).to(torch_device) model.eval() text_features = model.get_text_features(**inputs_dict) self.assertEqual(text_features[0].shape, (1, 10, config.text_config.vocab_size)) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() image_features = model.get_image_features(**inputs_dict) self.assertEqual( image_features[0].shape, ( self.model_tester.vision_model_tester.batch_size, self.model_tester.vision_model_tester.seq_length, config.vision_config.hidden_size, ), ) def test_get_qformer_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() qformer_features = model.get_qformer_features(**inputs_dict) self.assertEqual( qformer_features[0].shape, (self.model_tester.vision_model_tester.batch_size, 10, config.vision_config.hidden_size), ) # override from common to deal with nested configurations (`vision_config`, `text_config` and `qformer_config`) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for key in ["vision_config", "qformer_config", "text_config"]: setattr(configs_no_init, key, _config_zero_init(getattr(configs_no_init, key))) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class Blip2ModelIntegrationTest(unittest.TestCase): def test_inference_opt(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16 ).to(torch_device) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]) self.assertEqual("a woman sitting on the beach with a dog", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual( predictions[0].tolist(), [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118], ) self.assertEqual(generated_text, "it's not a city, it's a beach") def test_inference_opt_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16 ).to(torch_device) # prepare image image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) # Test output (in this case, slightly different from greedy search) self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) self.assertEqual(predictions[1].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) def test_inference_t5(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16 ).to(torch_device) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual("woman playing with dog on the beach", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual( predictions[0].tolist(), [0, 3, 7, 152, 67, 839, 1], ) self.assertEqual(generated_text, "san diego") def test_inference_t5_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16 ).to(torch_device) # prepare image image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) # Test output (in this case, slightly different from greedy search) self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual(predictions[1].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) @require_torch_multi_accelerator def test_inference_opt_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16, device_map="balanced" ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]) self.assertEqual("a woman sitting on the beach with a dog", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual( predictions[0].tolist(), [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118], ) self.assertEqual(generated_text, "it's not a city, it's a beach") @require_torch_multi_accelerator def test_inference_t5_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") device_map = device_map = { "query_tokens": 0, "vision_model": 0, "language_model": 1, "language_projection": 0, "qformer": 0, } model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16, device_map=device_map ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual("woman playing with dog on the beach", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output self.assertEqual( predictions[0].tolist(), [0, 3, 7, 152, 67, 839, 1], ) self.assertEqual(generated_text, "san diego")
transformers/tests/models/blip_2/test_modeling_blip_2.py/0
{ "file_path": "transformers/tests/models/blip_2/test_modeling_blip_2.py", "repo_id": "transformers", "token_count": 17607 }
405
# coding=utf-8 # Copyright 2018 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AddedToken, CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") SAMPLE_BPE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model") FRAMEWORK = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "almanach/camembert-base" tokenizer_class = CamembertTokenizer rust_tokenizer_class = CamembertTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = CamembertTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) @unittest.skip( "Token maps are not equal because someone set the probability of ('<unk>NOTUSED', -100), so it's never encoded for fast" ) def test_special_tokens_map_equal(self): return def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<pad>" token_id = 1 # 1 is the offset id, but in the spm vocab it's 3 self.assertEqual(self.get_tokenizer().convert_tokens_to_ids(token), token_id) self.assertEqual(self.get_tokenizer().convert_ids_to_tokens(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>NOTUSED") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "<mask>") self.assertEqual(len(vocab_keys), 1_005) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_rust_and_python_bpe_tokenizers(self): tokenizer = CamembertTokenizer(SAMPLE_BPE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) rust_tokenizer = CamembertTokenizerFast.from_pretrained(self.tmpdirname) sequence = "I was born in 92000, and this is falsé." ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) tokens = tokenizer.convert_ids_to_tokens(ids) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # camembert is a french model. So we also use french texts. sequences = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="almanach/camembert-base", revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf", sequences=sequences, ) # Overwritten because we have to use from slow (online pretrained is wrong, the tokenizer.json has a whole) def test_added_tokens_serialization(self): self.maxDiff = None # Utility to test the added vocab def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir): tokenizer = tokenizer_class.from_pretrained(temp_dir) self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens) self.assertIn(new_eos, tokenizer.added_tokens_decoder.values()) self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos) self.assertDictEqual(expected, tokenizer.added_tokens_decoder) return tokenizer new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # Load a slow tokenizer from the hub, init with the new token for fast to also include it tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos) EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"): self.assertEqual(tokenizer._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values())) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) with self.subTest( "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2 ) if self.rust_tokenizer_class is not None: with self.subTest( "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class" ): tokenizer_fast = _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2 ) with tempfile.TemporaryDirectory() as tmp_dir_3: tokenizer_fast.save_pretrained(tmp_dir_3) with self.subTest( "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest( "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"): if self.rust_tokenizer_class is not None: tokenizer_fast = self.rust_tokenizer_class.from_pretrained( pretrained_name, eos_token=new_eos, from_slow=True ) self.assertEqual(tokenizer_fast._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values())) # We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"): self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder) EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder with tempfile.TemporaryDirectory() as tmp_dir_4: tokenizer_fast.save_pretrained(tmp_dir_4) with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4 ) with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4 )
transformers/tests/models/camembert/test_tokenization_camembert.py/0
{ "file_path": "transformers/tests/models/camembert/test_tokenization_camembert.py", "repo_id": "transformers", "token_count": 5477 }
406
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow CLIP model. """ from __future__ import annotations import inspect import os import tempfile import unittest from importlib import import_module import requests from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCLIPModel, TFCLIPTextModel, TFCLIPVisionModel, TFSharedEmbeddings from transformers.modeling_tf_utils import keras from transformers.models.clip.modeling_tf_clip import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class TFCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFCLIPVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFCLIPVisionModel,) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # CLIP does not use inputs_embeds pass def test_graph_mode_with_inputs_embeds(self): # CLIP does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # CLIP has a different seq_length image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check num outputs self.assertEqual(len(outputs), num_out) # Check num layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) # Check attention outputs image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) # Check hidden states self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) class TFCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) # make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there # is still at least one token being attended to for each batch. # TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team. input_mask = tf.concat( [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 ) config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFCLIPTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPTextModel,) if is_tf_available() else () test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_inputs_embeds(self): # CLIP does not use inputs_embeds pass @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check number of outputs self.assertEqual(len(outputs), num_out) # Check number of layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) # Check hidden states self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) # Check attention outputs self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) seq_length = self.model_tester.seq_length key_length = getattr(self.model_tester, "key_length", seq_length) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, key_length], ) class TFCLIPModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFCLIPTextModelTester(parent) self.vision_model_tester = TFCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFCLIPModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFCLIPModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPModel,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFCLIPModel} if is_tf_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = TFCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # hidden_states are tested in individual model tests def test_hidden_states_output(self): pass # input_embeds are tested in individual model tests def test_inputs_embeds(self): pass # CLIPModel does not have input/output embeddings def test_model_common_attributes(self): pass # overwrite from common since `TFCLIPModelTester` set `return_loss` to `True` and causes the preparation of # `symbolic_inputs` failed. def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # remove `return_loss` to make code work if self.__class__.__name__ == "TFCLIPModelTest": inputs_dict.pop("return_loss", None) tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation(self): pass @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation_extended(self): pass @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") @slow def test_prepare_serving_output(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf class TFCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = TFCLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" ) outputs = model(**inputs, training=False) # verify the logits self.assertEqual( outputs.logits_per_image.shape, tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = tf.constant([[24.5701, 19.3049]]) tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
transformers/tests/models/clip/test_modeling_tf_clip.py/0
{ "file_path": "transformers/tests/models/clip/test_modeling_tf_clip.py", "repo_id": "transformers", "token_count": 12017 }
407
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Data2VecVision model. """ from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.modeling_tf_utils import keras from transformers.models.data2vec.modeling_tf_data2vec_vision import ( TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in Data2VecVision, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Data2VecVision has a different seq_length image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Overriding this method since the base method won't be compatible with Data2VecVision. @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) # Overriding this method since the base method won't be compatible with Data2VecVision. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` won't have labels against which we # could compute loss. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) # Test that model correctly compute the loss with kwargs possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a dict _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a tuple label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py", "repo_id": "transformers", "token_count": 9984 }
408
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DeiT model. """ import unittest import warnings from transformers import DeiTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.auto.modeling_auto import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class DeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DeiTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = DeiTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = DeiTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = DeiTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = DeiTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for DeiTForImageClassificationWithTeacher model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class.__name__ in MODEL_MAPPING_NAMES.values() or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class.__name__ not in [ *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(), *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224", torch_dtype=torch.float16, device_map="auto" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
transformers/tests/models/deit/test_modeling_deit.py/0
{ "file_path": "transformers/tests/models/deit/test_modeling_deit.py", "repo_id": "transformers", "token_count": 7266 }
409
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class FlaxDistilBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=True, ) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxDistilBertModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxDistilBertModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("distilbert-base-uncased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 768) self.assertEqual(output.shape, expected_shape) expected_slice = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/distilbert/test_modeling_flax_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_flax_distilbert.py", "repo_id": "transformers", "token_count": 2492 }
410
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DPT model. """ import unittest from transformers import Dinov2Config, DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DPTForDepthEstimation from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size # DPT's sequence length self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( backbone_config=self.get_backbone_config(), backbone=None, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_to_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPTForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation_dinov2(self): image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 576, 736)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.0433, 7.1636, 7.4268], [6.9047, 7.2471, 7.2355], [7.9261, 8.0631, 8.0244]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_beit(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-base-384").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_swinv2(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 256, 256)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py", "repo_id": "transformers", "token_count": 5432 }
411
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Encodec model. """ import copy import inspect import os import tempfile import unittest from typing import Dict, List, Tuple import numpy as np from datasets import Audio, load_dataset from transformers import AutoProcessor, EncodecConfig from transformers.testing_utils import ( is_torch_available, require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EncodecModel def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class EncodecModelTester: def __init__( self, parent, # `batch_size` needs to be an even number if the model has some outputs with batch dim != 0. batch_size=12, num_channels=2, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], num_lstm_layers=1, codebook_size=64, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.num_lstm_layers = num_lstm_layers self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def prepare_config_and_inputs_for_model_class(self, model_class): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["audio_codes"] = ids_tensor([1, self.batch_size, 1, self.num_channels], self.codebook_size).type( torch.int32 ) inputs_dict["audio_scales"] = [None] return config, inputs_dict def get_config(self): return EncodecConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, num_lstm_layers=self.num_lstm_layers, codebook_size=self.codebook_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = EncodecModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] result = model(input_values) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EncodecModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False pipeline_model_mapping = {"feature-extraction": EncodecModel} if is_torch_available() else {} input_name = "input_values" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # model does not have attention and does not support returning hidden states inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = EncodecModelTester(self) self.config_tester = ConfigTester( self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "bandwidth"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_model_common_attributes(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_torchscript_output_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_torchscript_output_hidden_state(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_attention_outputs(self): pass def test_feed_forward_chunking(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) config.chunk_length_s = None config.overlap = None config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10) hidden_states_no_chunk = model(**inputs)[0] torch.manual_seed(0) config.chunk_length_s = 1 config.overlap = 0 config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**inputs)[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_hidden_states_output(self): pass def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): # outputs are not tensors but list (since each sequence don't have the same frame_length) out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr def compute_rmse(arr1, arr2): arr1_normalized = normalize(arr1) arr2_normalized = normalize(arr2) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) @slow @require_torch class EncodecIntegrationTest(unittest.TestCase): def test_integration_24kHz(self): expected_rmse = { "1.5": 0.0025, "24.0": 0.0015, } expected_codesums = { "1.5": [371955], "24.0": [6659962], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_24khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], bandwidth=float(bandwidth)) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs.to_tuple() input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_integration_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [144259, 146765, 156435, 176871, 161971], "24.0": [1568553, 1294948, 1306190, 1464747, 1663150], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) model = model.eval() processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] # transform mono to stereo audio_sample = np.array([audio_sample, audio_sample]) inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt").to( torch_device ) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth), return_dict=False ) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_batch_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], "24.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id, chunk_length_s=1, overlap=0.01) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_samples = [ np.array([audio_sample["array"], audio_sample["array"]]) for audio_sample in librispeech_dummy[-2:]["audio"] ] inputs = processor(raw_audio=audio_samples, sampling_rate=processor.sampling_rate, return_tensors="pt") input_values = inputs["input_values"].to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(input_values, bandwidth=float(bandwidth), return_dict=False) audio_code_sums_0 = [a[0][0].sum().cpu().item() for a in encoder_outputs[0]] audio_code_sums_1 = [a[0][1].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums_0, expected_codesums[bandwidth][0]) self.assertListEqual(audio_code_sums_1, expected_codesums[bandwidth][1]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales)[0] input_values_enc_dec = model(input_values, bandwidth=float(bandwidth))[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(input_values.shape == input_values_enc_dec.shape) arr = input_values[0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse)
transformers/tests/models/encodec/test_modeling_encodec.py/0
{ "file_path": "transformers/tests/models/encodec/test_modeling_encodec.py", "repo_id": "transformers", "token_count": 11925 }
412
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Falcon model. """ import tempfile import unittest from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, FalconConfig, is_torch_available, set_seed, ) from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_sdpa, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class FalconModelTester: def __init__( self, parent, batch_size=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return FalconConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=True, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = FalconModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FalconModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (FalconForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FalconModel, "question-answering": FalconForQuestionAnswering, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: config.alibi = alibi self.model_tester.create_and_check_model(config, *inputs) def test_falcon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_past_key_values_format(self): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): return model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "num_kv_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) batch_size, seq_length = inputs["input_ids"].shape for i in range(num_hidden_layers): if config.new_decoder_architecture: num_attention_heads = config.num_attention_heads elif config.multi_query: num_attention_heads = 1 self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = FalconModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = FalconModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") # NOTE: This check is disabled for Falcon as the non-SDPA/SDPA implementation is in the same class (legacy reason). # for name, submodule in model_eager.named_modules(): # if "SdpaAttention" in submodule.__class__.__name__: # raise ValueError("The eager model should not have SDPA attention layers") # has_sdpa = False # for name, submodule in model_sdpa.named_modules(): # if "SdpaAttention" in submodule.__class__.__name__: # has_sdpa = True # break # if not has_sdpa: # raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # Test results are the same with and without cache outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0) @require_bitsandbytes @slow def test_batched_generation(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "tiiuae/falcon-7b", device_map="auto", load_in_4bit=True, ) test_text = "A sequence: 1, 2" # should generate the rest of the sequence unpadded_inputs = tokenizer([test_text], return_tensors="pt").to("cuda:0") unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20) unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True) dummy_text = "This is a longer text " * 2 # forces left-padding on `test_text` padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to("cuda:0") padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20) padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True) expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, " self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) # left-padding exists self.assertEqual(unpadded_gen_text[0], expected_output) self.assertEqual(padded_gen_text[0], expected_output)
transformers/tests/models/falcon/test_modeling_falcon.py/0
{ "file_path": "transformers/tests/models/falcon/test_modeling_falcon.py", "repo_id": "transformers", "token_count": 11320 }
413
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from huggingface_hub import hf_hub_download from transformers import GitConfig, GitProcessor, GitVisionConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, GitForCausalLM, GitModel, GitVisionModel from transformers.models.git.modeling_git import GIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class GitVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=16, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return GitVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GitVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GitVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as GIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (GitVisionModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GitVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=GitVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GIT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GitVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GitVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in GIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GitVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GitModelTester: def __init__( self, parent, num_channels=3, image_size=32, patch_size=16, batch_size=13, text_seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.batch_size = batch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope # make sure the BOS, EOS and PAD tokens are within the vocab self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 # for GIT, the sequence length is the sum of the text and patch tokens, + 1 due to the CLS token self.seq_length = self.text_seq_length + int((self.image_size / self.patch_size) ** 2) + 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, input_ids, input_mask, pixel_values def get_config(self): """ Returns a tiny configuration by default. """ return GitConfig( vision_config={ "num_channels": self.num_channels, "image_size": self.image_size, "patch_size": self.patch_size, "hidden_size": self.hidden_size, "projection_dim": 32, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, }, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def create_and_check_model(self, config, input_ids, input_mask, pixel_values): model = GitModel(config=config) model.to(torch_device) model.eval() # inference with pixel values result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # inference without pixel values result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # inference with pixel values result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # inference without pixel values result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.vocab_size)) # training result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values, labels=input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertTrue(result.loss.item() > 0) def _test_beam_search_generate(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # generate generated_ids = model.generate( input_ids, attention_mask=input_mask, pixel_values=pixel_values, do_sample=False, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def _test_batched_generate_captioning(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # generate generated_ids = model.generate( input_ids=None, # captioning -> no input_ids attention_mask=None, pixel_values=pixel_values, do_sample=False, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, pixel_values, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else () all_generative_model_classes = (GitForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM} if is_torch_available() else {} ) fx_compatible = False test_torchscript = False # special case for GitForCausalLM model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_CAUSAL_LM_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = GitModelTester(self) self.config_tester = ConfigTester(self, config_class=GitConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_beam_search_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_beam_search_generate(*config_and_inputs) def test_batched_generate_captioning(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_batched_generate_captioning(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in GIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GitModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="GIT has pixel values as additional input") def test_beam_search_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_greedy_generate_dict_outputs_use_cache(self): pass @require_torch @require_vision @slow class GitModelIntegrationTest(unittest.TestCase): def test_forward_pass(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, text="hello world", return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 201, 30522)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.9514, -0.9512, -0.9507], [-0.5454, -0.5453, -0.5453], [-0.8862, -0.8857, -0.8848]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_image_captioning(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) outputs = model.generate( pixel_values=pixel_values, max_length=20, output_scores=True, return_dict_in_generate=True ) generated_caption = processor.batch_decode(outputs.sequences, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 9)) self.assertEqual(outputs.sequences.shape, expected_shape) self.assertEquals(generated_caption, "two cats laying on a pink blanket") self.assertTrue(outputs.scores[-1].shape, expected_shape) expected_slice = torch.tensor([[-0.8805, -0.8803, -0.8799]], device=torch_device) self.assertTrue(torch.allclose(outputs.scores[-1][0, :3], expected_slice, atol=1e-4)) def test_visual_question_answering(self): processor = GitProcessor.from_pretrained("microsoft/git-base-textvqa") model = GitForCausalLM.from_pretrained("microsoft/git-base-textvqa") model.to(torch_device) # prepare image file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") image = Image.open(file_path).convert("RGB") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # prepare question question = "what does the front of the bus say at the top?" input_ids = processor(text=question, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0).to(torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=20) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 15)) self.assertEqual(generated_ids.shape, expected_shape) self.assertEquals(generated_caption, "what does the front of the bus say at the top? special") def test_batched_generation(self): processor = GitProcessor.from_pretrained("microsoft/git-base-coco") model = GitForCausalLM.from_pretrained("microsoft/git-base-coco") model.to(torch_device) # create batch of size 2 image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=[image, image], return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # we have to prepare `input_ids` with the same batch size as `pixel_values` start_token_id = model.config.bos_token_id input_ids = torch.tensor([[start_token_id], [start_token_id]], device=torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) generated_captions = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEquals(generated_captions, ["two cats sleeping on a pink blanket next to remotes."] * 2)
transformers/tests/models/git/test_modeling_git.py/0
{ "file_path": "transformers/tests/models/git/test_modeling_git.py", "repo_id": "transformers", "token_count": 9579 }
414
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import tempfile import unittest from transformers import ImageGPTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ) if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None def get_large_model_config(self): return ImageGPTConfig.from_pretrained("imagegpt") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): pixel_values = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return ImageGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 513 config.max_position_embeddings = 1024 return config def prepare_config_and_inputs_for_decoder(self): ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_imagegpt_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, token_type_ids=token_type_ids, head_mask=head_mask) result = model(pixel_values, token_type_ids=token_type_ids) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTForCausalImageModeling(config) model.to(torch_device) model.eval() labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) result = model(pixel_values, token_type_ids=token_type_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) # ImageGPTForCausalImageModeling doens't have tied input- and output embeddings self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1)) def create_and_check_imagegpt_for_image_classification( self, config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = ImageGPTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel) if is_torch_available() else () ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} if is_torch_available() else {} ) test_missing_keys = False input_name = "pixel_values" # as ImageGPTForImageClassification isn't included in any auto mapping, we add labels here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ImageGPTForImageClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict # we overwrite the _check_scores method of GenerationTesterMixin, as ImageGPTForCausalImageModeling doesn't have tied input- and output embeddings def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size - 1) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) def test_imagegpt_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ImageGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pixel_values = inputs["pixel_values"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(pixel_values) with torch.no_grad(): model(**inputs)[0] def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: pixel_values = inputs["pixel_values"] traced_model = torch.jit.trace(model, pixel_values) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ImageGPTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") if is_vision_available() else None @slow def test_inference_causal_lm_head(self): model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1024, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/imagegpt/test_modeling_imagegpt.py/0
{ "file_path": "transformers/tests/models/imagegpt/test_modeling_imagegpt.py", "repo_id": "transformers", "token_count": 10627 }
415
# coding=utf-8 # Copyright Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class TFLEDModelTester: config_cls = LEDConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.attention_window = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after self.key_length = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests self.encoder_seq_length = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, ) inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids) global_attention_mask = tf.concat( [tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]], axis=-1, ) inputs_dict["global_attention_mask"] = global_attention_mask return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFLEDModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_led_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class TFLEDModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFLEDModelTester(self) self.config_tester = ConfigTester(self, config_class=LEDConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"]) num_global_attn_indices = 2 inputs_dict["global_attention_mask"] = tf.where( tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], ) config.return_dict = True seq_length = self.model_tester.seq_length encoder_seq_length = self.model_tester.encoder_seq_length def check_decoder_attentions_output(outputs): decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) def check_encoder_attentions_output(outputs): attentions = [t.numpy() for t in outputs.encoder_attentions] global_attentions = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) self.assertListEqual( list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["use_cache"] = False config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_saved_model_creation(self): pass def test_generate_with_headmasking(self): # TODO: Head-masking not yet implement pass def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @slow @require_tf class TFLEDModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led # change to intended input here input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 1024, 768) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3) def test_inference_with_head(self): model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384") # change to intended input here input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3, rtol=1e-3)
transformers/tests/models/led/test_modeling_tf_led.py/0
{ "file_path": "transformers/tests/models/led/test_modeling_tf_led.py", "repo_id": "transformers", "token_count": 6455 }
416
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): import torch from transformers import GPT2LMHeadModel @require_torch @require_sentencepiece @require_tokenizers class MegatronGPT2IntegrationTest(unittest.TestCase): @slow @unittest.skip("Model is not available.") def test_inference_no_head(self): directory = "nvidia/megatron-gpt2-345m/" if "MYDIR" in os.environ: directory = os.path.join(os.environ["MYDIR"], directory) model = GPT2LMHeadModel.from_pretrained(directory) model.to(torch_device) model.half() input_ids = torch.tensor( [[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]], device=torch_device, dtype=torch.long, ) with torch.no_grad(): output = model(input_ids).logits expected_shape = torch.Size((1, 9, 50257)) self.assertEqual(output.shape, expected_shape) expected_diag = torch.tensor( [ 4.9414, -0.2920, -1.2148, -4.0273, -0.5161, -5.2109, -1.2412, -1.8301, -1.7734, -4.7148, -0.2317, -1.0811, -2.1777, 0.4141, -3.7969, -4.0586, -2.5332, -3.3809, 4.3867, ], device=torch_device, dtype=torch.half, ) for i in range(19): r, c = 8 * i // 17, 2792 * i # along the diagonal computed, expected = output[0, r, c], expected_diag[i] msg = f"row={r} col={c} computed={computed} expected={expected}" self.assertAlmostEqual(computed, expected, delta=1e-4, msg=msg)
transformers/tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py/0
{ "file_path": "transformers/tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py", "repo_id": "transformers", "token_count": 1284 }
417
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MVP model. """ import copy import tempfile import unittest import timeout_decorator # noqa from transformers import MvpConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpTokenizer, ) from transformers.models.mvp.modeling_mvp import MvpDecoder, MvpEncoder, shift_tokens_right def prepare_mvp_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MvpModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_mvp_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MvpConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MvpModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MvpModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MvpEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MvpDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MvpHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() labels = _long_tensor([2] * batch_size).to(torch_device) config.num_labels = 3 model = MvpForSequenceClassification(config) model.to(torch_device) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels) expected_shape = torch.Size((batch_size, config.num_labels)) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() sequence_labels = ids_tensor([batch_size], 2).to(torch_device) model = MvpForQuestionAnswering(config) model.to(torch_device) outputs = model( input_ids=input_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) self.assertIsInstance(outputs["loss"].item(), float) @timeout_decorator.timeout(1) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device) lm_model = MvpForConditionalGeneration(config) lm_model.to(torch_device) outputs = lm_model(input_ids=input_ids, labels=lm_labels) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_lm_uneven_forward(self): config = MvpConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long) config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 generated_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @slow def test_tokenization(self): tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") examples = [" Hello world", " DomDramg"] # need leading spaces for equality fairseq_results = [ torch.tensor([0, 20920, 232, 2]), torch.tensor([0, 11349, 495, 4040, 571, 2]), ] for ex, desired_result in zip(examples, fairseq_results): mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = MvpForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_resize_tokens_embeddings_more(self): config, input_ids, _ = self._get_config_and_data() def _get_embs(m): return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone()) model = MvpForConditionalGeneration(config).eval().to(torch_device) input, output = _get_embs(model) self.assertTrue(torch.eq(input, output).all()) new_vocab_size = 45 model.resize_token_embeddings(new_vocab_size) input_new, output_new = _get_embs(model) self.assertEqual(input_new.shape, (new_vocab_size, config.d_model)) self.assertEqual(output_new.shape, (new_vocab_size, config.d_model)) self.assertTrue(torch.eq(input_new, output_new).all()) @require_torch class MvpModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (MvpForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": MvpForConditionalGeneration, "feature-extraction": MvpModel, "fill-mask": MvpForConditionalGeneration, "question-answering": MvpForQuestionAnswering, "summarization": MvpForConditionalGeneration, "text-classification": MvpForSequenceClassification, "text-generation": MvpForCausalLM, "text2text-generation": MvpForConditionalGeneration, "translation": MvpForConditionalGeneration, "zero-shot": MvpForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = MvpModelTester(self) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # MvpForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @require_sentencepiece @require_tokenizers class MvpModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return MvpTokenizer.from_pretrained("RUCAIBox/mvp") @slow def test_inference_no_head(self): model = MvpModel.from_pretrained("RUCAIBox/mvp").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = input_ids.ne(model.config.pad_token_id) with torch.no_grad(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3)) @slow def test_summarization_inference(self): model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" # fmt: skip EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet." dct = tok.batch_encode_plus( [PGE_ARTICLE], return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate(**dct) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True) self.assertEqual(EXPECTED_SUMMARY, decoded[0]) class MvpStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MvpConfig( vocab_size=self.vocab_size, d_model=self.d_model, encoder_layers=self.decoder_layers, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MvpDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MvpDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MvpDecoder, MvpForCausalLM) if is_torch_available() else () all_generative_model_classes = (MvpForCausalLM,) if is_torch_available() else () fx_comptatible = True test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return
transformers/tests/models/mvp/test_modeling_mvp.py/0
{ "file_path": "transformers/tests/models/mvp/test_modeling_mvp.py", "repo_id": "transformers", "token_count": 15208 }
418
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle, prepare_metadata from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image class OneFormerImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, size=None, do_resize=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], num_labels=10, do_reduce_labels=False, ignore_index=255, repo_path="shi-labs/oneformer_demo", class_info_file="ade20k_panoptic.json", num_text=10, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.class_info_file = class_info_file self.num_text = num_text self.repo_path = repo_path # for the post_process_functions self.batch_size = 2 self.num_queries = 10 self.num_classes = 10 self.height = 3 self.width = 4 self.num_labels = num_labels self.do_reduce_labels = do_reduce_labels self.ignore_index = ignore_index def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "num_text": self.num_text, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to OneFormerImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def get_fake_oneformer_outputs(self): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), ) def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class OneFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string image_processing_class = image_processing_class def setUp(self): self.image_processor_tester = OneFormerImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_proc_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "ignore_index")) self.assertTrue(hasattr(image_processor, "class_info_file")) self.assertTrue(hasattr(image_processor, "num_text")) self.assertTrue(hasattr(image_processor, "repo_path")) self.assertTrue(hasattr(image_processor, "metadata")) self.assertTrue(hasattr(image_processor, "do_reduce_labels")) def comm_get_image_processor_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): image_processor = self.image_processing_class(**self.image_processor_dict) # prepare image and target num_labels = self.image_processor_tester.num_labels annotations = None instance_id_to_semantic_id = None image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) if with_segmentation_maps: high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] if segmentation_type == "pil": annotations = [Image.fromarray(annotation) for annotation in annotations] inputs = image_processor( image_inputs, ["semantic"] * len(image_inputs), annotations, return_tensors="pt", instance_id_to_semantic_id=instance_id_to_semantic_id, pad_and_return_pixel_mask=True, ) return inputs def test_init_without_params(self): pass def test_call_with_segmentation_maps(self): def common(is_instance_map=False, segmentation_type=None): inputs = self.comm_get_image_processor_inputs( with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type ) mask_labels = inputs["mask_labels"] class_labels = inputs["class_labels"] pixel_values = inputs["pixel_values"] text_inputs = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs): self.assertEqual(mask_label.shape[0], class_label.shape[0]) # this ensure padding has happened self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:]) self.assertEqual(len(text_input), self.image_processor_tester.num_text) common() common(is_instance_map=True) common(is_instance_map=False, segmentation_type="pil") common(is_instance_map=True, segmentation_type="pil") def test_binary_mask_to_rle(self): fake_binary_mask = np.zeros((20, 50)) fake_binary_mask[0, 20:] = 1 fake_binary_mask[1, :15] = 1 fake_binary_mask[5, :10] = 1 rle = binary_mask_to_rle(fake_binary_mask) self.assertEqual(len(rle), 4) self.assertEqual(rle[0], 21) self.assertEqual(rle[1], 45) def test_post_process_semantic_segmentation(self): fature_extractor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = fature_extractor.post_process_semantic_segmentation(outputs) self.assertEqual(len(segmentation), self.image_processor_tester.batch_size) self.assertEqual( segmentation[0].shape, ( self.image_processor_tester.height, self.image_processor_tester.width, ), ) target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)] segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes) self.assertEqual(segmentation[0].shape, target_sizes[0]) def test_post_process_instance_segmentation(self): image_processor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) segmentation_with_opts = image_processor.post_process_instance_segmentation( outputs, threshold=0, target_sizes=[(1, 4) for _ in range(self.image_processor_tester.batch_size)], task_type="panoptic", ) self.assertTrue(len(segmentation_with_opts) == self.image_processor_tester.batch_size) for el in segmentation_with_opts: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual(el["segmentation"].shape, (1, 4)) def test_post_process_panoptic_segmentation(self): image_processor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = image_processor.post_process_panoptic_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) def test_can_load_with_local_metadata(self): # Create a temporary json file class_info = { "0": {"isthing": 0, "name": "foo"}, "1": {"isthing": 0, "name": "bar"}, "2": {"isthing": 1, "name": "baz"}, } metadata = prepare_metadata(class_info) with tempfile.TemporaryDirectory() as tmpdirname: metadata_path = os.path.join(tmpdirname, "metadata.json") with open(metadata_path, "w") as f: json.dump(class_info, f) config_dict = self.image_processor_dict config_dict["class_info_file"] = metadata_path config_dict["repo_path"] = tmpdirname image_processor = self.image_processing_class(**config_dict) self.assertEqual(image_processor.metadata, metadata)
transformers/tests/models/oneformer/test_image_processing_oneformer.py/0
{ "file_path": "transformers/tests/models/oneformer/test_image_processing_oneformer.py", "repo_id": "transformers", "token_count": 6432 }
419
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch OwlViT model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import OwlViTForObjectDetection, OwlViTModel, OwlViTTextModel, OwlViTVisionModel from transformers.models.owlvit.modeling_owlvit import OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import OwlViTProcessor class OwlViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return OwlViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = OwlViTVisionModel(config=config).to(torch_device) model.eval() pixel_values = pixel_values.to(torch.float32) with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class OwlViTVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as OWLVIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (OwlViTVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = OwlViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=OwlViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OwlViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTTextModelTester: def __init__( self, parent, batch_size=12, num_queries=4, seq_length=16, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=12, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=16, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_queries = num_queries self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length]) if input_mask is not None: num_text, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,)) for idx, start_index in enumerate(rnd_start_indices): input_mask[idx, :start_index] = 1 input_mask[idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return OwlViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = OwlViTTextModel(config=config).to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=input_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class OwlViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (OwlViTTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = OwlViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=OwlViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OwlViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = OwlViTTextModelTester(parent, **text_kwargs) self.vision_model_tester = OwlViTVisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = OwlViTModel(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, ) image_logits_size = ( self.vision_model_tester.batch_size, self.text_model_tester.batch_size * self.text_model_tester.num_queries, ) text_logits_size = ( self.text_model_tester.batch_size * self.text_model_tester.num_queries, self.vision_model_tester.batch_size, ) self.parent.assertEqual(result.logits_per_image.shape, image_logits_size) self.parent.assertEqual(result.logits_per_text.shape, text_logits_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": False, } return config, inputs_dict @require_torch class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OwlViTModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = OwlViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="OwlViTModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for OWLVIT def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save OwlViTConfig and check if we can load OwlViTVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = OwlViTVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save OwlViTConfig and check if we can load OwlViTTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = OwlViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OwlViTModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTForObjectDetectionTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = OwlViTTextModelTester(parent) self.vision_model_tester = OwlViTVisionModelTester(parent) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values, input_ids, attention_mask def get_config(self): return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, pixel_values, input_ids, attention_mask): model = OwlViTForObjectDetection(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, return_dict=True, ) pred_boxes_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_logits_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_class_embeds_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, self.text_model_tester.hidden_size, ) self.parent.assertEqual(result.pred_boxes.shape, pred_boxes_size) self.parent.assertEqual(result.logits.shape, pred_logits_size) self.parent.assertEqual(result.class_embeds.shape, pred_class_embeds_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, input_ids, attention_mask = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class OwlViTForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (OwlViTForObjectDetection,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = OwlViTForObjectDetectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="OwlViTModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Test_initialization is tested in individual model tests") def test_initialization(self): pass @unittest.skip(reason="Test_forward_signature is tested in individual model tests") def test_forward_signature(self): pass @unittest.skip(reason="Test_save_load_fast_init_from_base is tested in individual model tests") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OwlViTForObjectDetection.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class OwlViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/owlvit-base-patch32" model = OwlViTModel.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) @slow def test_inference_object_detection(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow def test_inference_one_shot_object_detection(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow @require_torch_accelerator @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) # No need to check the logits, we just check inference runs fine. num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
transformers/tests/models/owlvit/test_modeling_owlvit.py/0
{ "file_path": "transformers/tests/models/owlvit/test_modeling_owlvit.py", "repo_id": "transformers", "token_count": 15371 }
420
# coding=utf-8 # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Qwen2 model. """ import gc import tempfile import unittest import pytest from transformers import AutoTokenizer, Qwen2Config, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Qwen2ForCausalLM, Qwen2ForSequenceClassification, Qwen2Model, ) class Qwen2ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, max_window_layers=3, use_sliding_window=True, sliding_window=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, bos_token_id=1, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.max_window_layers = max_window_layers self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.scope = scope # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Qwen2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, max_window_layers=self.max_window_layers, use_sliding_window=self.use_sliding_window, sliding_window=self.sliding_window, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, bos_token_id=self.bos_token_id, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Qwen2 def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Qwen2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Qwen2 def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Qwen2Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Qwen2 def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Qwen2ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Qwen2 def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Qwen2ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->Qwen2 class Qwen2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Qwen2Model, Qwen2ForCausalLM, Qwen2ForSequenceClassification) if is_torch_available() else () all_generative_model_classes = (Qwen2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Qwen2Model, "text-classification": Qwen2ForSequenceClassification, "text-generation": Qwen2ForCausalLM, "zero-shot": Qwen2ForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = Qwen2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Qwen2_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Qwen2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Qwen2_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Qwen2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Qwen2_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = Qwen2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Qwen2 buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip("Qwen2 uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) with self.assertRaises(ValueError): _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Qwen2 apparently does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): self.skipTest("Qwen2 flash attention does not support right padding") @require_torch class Qwen2IntegrationTest(unittest.TestCase): @slow def test_model_450m_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-450m-beta", device_map="auto") input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([-5.8781, -5.8616, -0.1052, -4.7200, -5.8781, -5.8774, -5.8773, -5.8777, -5.8781, -5.8780, -5.8781, -5.8779, -1.0787, 1.7583, -5.8779, -5.8780, -5.8783, -5.8778, -5.8776, -5.8781, -5.8784, -5.8778, -5.8778, -5.8777, -5.8779, -5.8778, -5.8776, -5.8780, -5.8779, -5.8781]) # fmt: skip print(out[0, 0, :30]) torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4) del model backend_empty_cache(torch_device) gc.collect() @slow def test_model_450m_generation(self): EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big""" prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-450m-beta", use_fast=False) model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-450m-beta", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect() @require_bitsandbytes @slow @require_flash_attn def test_model_450m_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen2ForCausalLM.from_pretrained( "Qwen/Qwen2-450m-beta", device_map="auto", load_in_4bit=True, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model del model backend_empty_cache(torch_device) gc.collect() @slow @require_torch_sdpa def test_model_450m_long_prompt_sdpa(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen2ForCausalLM.from_pretrained( "Qwen/Qwen2-450m-beta", device_map="auto", attn_implementation="sdpa", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = assistant_model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model backend_empty_cache(torch_device) gc.collect() EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big""" prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-450m-beta", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_speculative_generation(self): EXPECTED_TEXT_COMPLETION = ( "My favourite condiment is 100% Sriracha. I love the heat, the tang and the fact costs" ) prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B-beta", use_fast=False) model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-450m-beta", device_map="auto", torch_dtype=torch.float16) assistant_model = Qwen2ForCausalLM.from_pretrained( "Qwen/Qwen2-450m-beta", device_map="auto", torch_dtype=torch.float16 ) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs set_seed(0) generated_ids = model.generate( input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model ) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect()
transformers/tests/models/qwen2/test_modeling_qwen2.py/0
{ "file_path": "transformers/tests/models/qwen2/test_modeling_qwen2.py", "repo_id": "transformers", "token_count": 11541 }
421
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch RegNet model. """ import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class RegNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = RegNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = RegNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as RegNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = RegNetModelTester(self) self.config_tester = ConfigTester(self, config_class=RegNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="RegNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="RegNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = RegNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class RegNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.4180, -1.5051, -3.4836]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/regnet/test_modeling_regnet.py/0
{ "file_path": "transformers/tests/models/regnet/test_modeling_regnet.py", "repo_id": "transformers", "token_count": 4167 }
422
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) # Copied from tests.models.roberta.test_modeling_flax_roberta.FlaxRobertaModelTester with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = RobertaPreLayerNormConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modeling_flax_roberta.FlaxRobertaModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40 class FlaxRobertaPreLayerNormModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxRobertaPreLayerNormModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class TFRobertaPreLayerNormModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True) input_ids = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.int32) output = model(input_ids)[0] expected_shape = [1, 11, 50265] self.assertEqual(list(output.shape), expected_shape) # compare the actual values for a slice. EXPECTED_SLICE = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.float32 ) self.assertTrue(np.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4)) @slow def test_inference_no_head(self): model = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True) input_ids = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.int32) output = model(input_ids)[0] # compare the actual values for a slice. EXPECTED_SLICE = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.float32 ) self.assertTrue(np.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4))
transformers/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py/0
{ "file_path": "transformers/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py", "repo_id": "transformers", "token_count": 3449 }
423
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class SamProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def prepare_mask_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ mask_inputs = [np.random.randint(255, size=(30, 400), dtype=np.uint8)] mask_inputs = [Image.fromarray(x) for x in mask_inputs] return mask_inputs def test_save_load_pretrained_additional_features(self): processor = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, SamImageProcessor) def test_image_processor_no_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) for image in input_feat_extract.pixel_values: self.assertEqual(image.shape, (3, 1024, 1024)) for original_size in input_feat_extract.original_sizes: np.testing.assert_array_equal(original_size, np.array([30, 400])) for reshaped_input_size in input_feat_extract.reshaped_input_sizes: np.testing.assert_array_equal( reshaped_input_size, np.array([77, 1024]) ) # reshaped_input_size value is before padding def test_image_processor_with_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() mask_input = self.prepare_mask_inputs() input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="np") input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) for label in input_feat_extract.labels: self.assertEqual(label.shape, (256, 256)) @require_torch def test_post_process_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = [torch.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks( dummy_masks, torch.tensor(original_sizes), torch.tensor(reshaped_input_size) ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(ValueError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size)) @require_vision @require_tf class TFSamProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, SamImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") input_feat_extract.pop("original_sizes") # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes") # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) @require_tf def test_post_process_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = [tf.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf") self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks( dummy_masks, tf.convert_to_tensor(original_sizes), tf.convert_to_tensor(reshaped_input_size), return_tensors="tf", ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks( dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf" ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError): masks = processor.post_process_masks( dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf" ) @require_vision @require_torchvision class SamProcessorEquivalenceTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def test_post_process_masks_equivalence(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = np.random.randint(0, 2, size=(1, 3, 5, 5)).astype(np.float32) tf_dummy_masks = [tf.convert_to_tensor(dummy_masks)] pt_dummy_masks = [torch.tensor(dummy_masks)] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] tf_masks = processor.post_process_masks( tf_dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf" ) pt_masks = processor.post_process_masks( pt_dummy_masks, original_sizes, reshaped_input_size, return_tensors="pt" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy())) @is_pt_tf_cross_test def test_image_processor_equivalence(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() pt_input_feat_extract = image_processor(image_input, return_tensors="pt")["pixel_values"].numpy() pt_input_processor = processor(images=image_input, return_tensors="pt")["pixel_values"].numpy() tf_input_feat_extract = image_processor(image_input, return_tensors="tf")["pixel_values"].numpy() tf_input_processor = processor(images=image_input, return_tensors="tf")["pixel_values"].numpy() self.assertTrue(np.allclose(pt_input_feat_extract, pt_input_processor)) self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_feat_extract)) self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_processor))
transformers/tests/models/sam/test_processor_sam.py/0
{ "file_path": "transformers/tests/models/sam/test_processor_sam.py", "repo_id": "transformers", "token_count": 4944 }
424
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Hubert model. """ import math import unittest import pytest from transformers import SEWConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SEWForCTC, SEWForSequenceClassification, SEWModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices class SEWModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=32, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(64, 32, 32), conv_stride=(5, 2, 1), conv_kernel=(10, 3, 1), conv_bias=False, num_conv_pos_embeddings=31, num_conv_pos_embedding_groups=2, squeeze_factor=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.squeeze_factor = squeeze_factor self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length // self.squeeze_factor def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return SEWConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, squeeze_factor=self.squeeze_factor, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = SEWModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = SEWModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = SEWForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_loss(self, config, input_values, *args): model = SEWForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = SEWForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class SEWModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SEWForCTC, SEWModel, SEWForSequenceClassification) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": SEWForSequenceClassification, "automatic-speech-recognition": SEWForCTC, "feature-extraction": SEWModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = SEWModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Hubert has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # SEW cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # SEW has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = SEWModel.from_pretrained("asapp/sew-tiny-100k") self.assertIsNotNone(model) @require_torch class SEWUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_soundfile @slow class SEWModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_pretrained_batched(self): model = SEWModel.from_pretrained("asapp/sew-tiny-100k").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-tiny-100k") input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state # expected outputs taken from the original SEW implementation expected_outputs_first = torch.tensor( [ [ [0.1509, 0.5372, 0.3061, -0.1694], [-0.1700, 0.5764, 0.2753, -0.1299], [0.1281, 0.7949, 0.2342, -0.1624], [-0.1627, 0.6710, 0.2215, -0.1317], ], [ [0.0408, 1.4355, 0.8605, -0.0968], [0.0393, 1.2368, 0.6826, 0.0364], [-0.1269, 1.9215, 1.1677, -0.1297], [-0.1654, 1.6524, 0.6877, -0.0196], ], ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [1.3379, -0.1450, -0.1500, -0.0515], [0.8364, -0.1680, -0.1248, -0.0689], [1.2791, -0.1507, -0.1523, -0.0564], [0.8208, -0.1690, -0.1199, -0.0751], ], [ [0.6959, -0.0861, -0.1235, -0.0861], [0.4700, -0.1686, -0.1141, -0.1199], [1.0776, -0.1137, -0.0124, -0.0472], [0.5774, -0.1675, -0.0376, -0.0823], ], ], device=torch_device, ) expected_output_sum = 62146.7422 self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3)) self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3)) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 5) def test_inference_ctc_batched(self): model = SEWForCTC.from_pretrained("asapp/sew-tiny-100k-ft-ls100h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("asapp/sew-tiny-100k-ft-ls100h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "swet covered brian's body trickling into the tightloine closs hat was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/sew/test_modeling_sew.py/0
{ "file_path": "transformers/tests/models/sew/test_modeling_sew.py", "repo_id": "transformers", "token_count": 10134 }
425
# coding=utf-8 # Copyright 2024 BigCode and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Starcoder2 model. """ import tempfile import unittest import pytest from transformers import Starcoder2Config, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2Model, ) # Copied from transformers.tests.models.mistral.test_modeling_mistral.Starcoder2ModelTester with Mistral->Starcoder2 class Starcoder2ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels # Ignore copy def get_config(self): return Starcoder2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, eos_token_id=self.pad_token_id, bos_token_id=self.pad_token_id, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Starcoder2 def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Starcoder2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Starcoder2 def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Starcoder2Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Starcoder2 def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Starcoder2ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Starcoder2 def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Starcoder2ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from transformers.tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->Starcoder2 class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Starcoder2Model, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = (Starcoder2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Starcoder2Model, "text-classification": Starcoder2ForSequenceClassification, "text-generation": Starcoder2ForCausalLM, "zero-shot": Starcoder2ForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = Starcoder2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Starcoder2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Starcoder2_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Starcoder2_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Starcoder2_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Starcoder2 buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip("Starcoder2 uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) with self.assertRaises(ValueError): _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Starcoder2 apparently does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): self.skipTest("Starcoder2 flash attention does not support right padding") @slow @require_torch_gpu class Starcoder2IntegrationTest(unittest.TestCase): def test_starcoder2_batched_generation_sdpa(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) def test_starcoder2_batched_generation_eager(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="eager" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_flash_attn def test_starcoder2_batched_generation_fa2(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_bitsandbytes def test_starcoder2_batched_generation_4bit(self): EXPECTED_TEXT = [ 'Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is related to the topic of "How to make a game". I am currently working on a project', 'def hello_world():\n\treturn "Hello World"\n\[email protected](\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\[email protected]', ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained(model_id, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text)
transformers/tests/models/starcoder2/test_modeling_starcoder2.py/0
{ "file_path": "transformers/tests/models/starcoder2/test_modeling_starcoder2.py", "repo_id": "transformers", "token_count": 10451 }
426
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def create_and_check_model(self, config, pixel_values): model = TimmBackbone(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): self.config_class = PretrainedConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester(self, config_class=self.config_class, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, (-1,)) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip("TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip("TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_model_common_attributes(self): pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip("TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip("Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip("Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py/0
{ "file_path": "transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py", "repo_id": "transformers", "token_count": 4294 }
427
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest from transformers import UMT5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace if is_torch_available(): import torch from transformers import ( AutoTokenizer, UMT5EncoderModel, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering, UMT5ForSequenceClassification, UMT5ForTokenClassification, UMT5Model, ) # Copied from test.models.t5.test_modeling_t5.T5ModelTester with T5->UMT5 class UMT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=False, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return UMT5Config.from_pretrained("google/umt5-base") def prepare_inputs_dict( self, config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input input_ids = input_ids.clamp(self.pad_token_id + 2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() config.encoder_attention_heads = config.num_attention_heads input_dict = self.prepare_inputs_dict(config, input_ids, decoder_input_ids) return config, input_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_pipeline_config(self): return UMT5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_model_fp16_forward( self, config, input_dict, ): model = UMT5Model(config=config).to(torch_device).half().eval() output = model(**input_dict)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_sequence_classification_head( self, config, input_dict, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model(**input_dict, labels=labels) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) @require_torch class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UMT5Model, UMT5ForConditionalGeneration, UMT5ForSequenceClassification, UMT5ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (UMT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": UMT5ForConditionalGeneration, "feature-extraction": UMT5Model, "question-answering": UMT5ForQuestionAnswering, "summarization": UMT5ForConditionalGeneration, "text-classification": UMT5ForSequenceClassification, "text2text-generation": UMT5ForConditionalGeneration, "translation": UMT5ForConditionalGeneration, "zero-shot": UMT5ForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = True test_torchscript = True # The small UMT5 model needs higher percentages for CPU/MP tests model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = UMT5ModelTester(self) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "UMT5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # UMT5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (UMT5Model, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = UMT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/t5_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] model = UMT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTester with T5->UMT5 class UMT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return UMT5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTest with T5->UMT5 class UMT5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (UMT5EncoderModel, UMT5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": UMT5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (UMT5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = UMT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UMT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) @require_torch @require_sentencepiece @require_tokenizers class Umt5IntegrationTest(unittest.TestCase): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_integration_test(self): """ For comparison run the kaggle notbook available here : https://www.kaggle.com/arthurzucker/umt5-inference """ model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=False, legacy=False) input_text = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] input_ids = tokenizer(input_text, return_tensors="pt", padding=True).input_ids # fmt: off EXPECTED_IDS = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(input_ids, EXPECTED_IDS) generated_ids = model.generate(input_ids.to(torch_device)) EXPECTED_FILLING = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] filling = tokenizer.batch_decode(generated_ids) self.assertEqual(filling, EXPECTED_FILLING)
transformers/tests/models/umt5/test_modeling_umt5.py/0
{ "file_path": "transformers/tests/models/umt5/test_modeling_umt5.py", "repo_id": "transformers", "token_count": 15619 }
428
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow ViT model. """ from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class TFViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFViTModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Test with an image with different size than the one specified in config. image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) seq_length = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFViTForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # Test with an image with different size than the one specified in config. image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFViTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_tf_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ViT does not use inputs_embeds") def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFViTModel.from_pretrained("google/vit-base-patch16-224") self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.2744, 0.8215, -0.0836]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
transformers/tests/models/vit/test_modeling_tf_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_modeling_tf_vit.py", "repo_id": "transformers", "token_count": 3920 }
429
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import transformers from transformers import XGLMConfig, XGLMTokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_sentencepiece, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp import numpy as np from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.xglm.modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel if is_torch_available(): import torch @require_flax class FlaxXGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 3, self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_sentencepiece @require_flax class FlaxXGLMModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxXGLMModel, FlaxXGLMForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxXGLMForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxXGLMModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = XGLMTokenizer.from_pretrained("XGLM", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxXGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.config.num_beams = 1 model.config.do_sample = False jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of questions, but I'm not sure if I'm", "Hey, I'm a newbie to the forum and I'", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/xglm-564M") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
transformers/tests/models/xglm/test_modeling_flax_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_modeling_flax_xglm.py", "repo_id": "transformers", "token_count": 7114 }
430
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, OPTForCausalLM from transformers.testing_utils import ( require_bitsandbytes, require_peft, require_torch, require_torch_gpu, slow, torch_device, ) from transformers.utils import is_torch_available if is_torch_available(): import torch @require_peft @require_torch class PeftTesterMixin: peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",) transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",) transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) # TODO: run it with CI after PEFT release. @slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): """ A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library. """ def _check_lora_correctly_converted(self, model): """ Utility method to check if the model has correctly adapters injected on it. """ from peft.tuners.tuners_utils import BaseTunerLayer is_peft_loaded = False for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break return is_peft_loaded def test_peft_from_pretrained(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This checks if we pass a remote folder that contains an adapter config and adapter weights, it should correctly load a model that has adapters injected on it. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) self.assertTrue(peft_model._hf_peft_config_loaded) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_state_dict(self): """ Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains the expected keys. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) state_dict = peft_model.get_adapter_state_dict() for key in state_dict.keys(): self.assertTrue("lora" in key) def test_peft_save_pretrained(self): """ Test that checks various combinations of `save_pretrained` with a model that has adapters loaded on it. This checks if the saved model contains the expected files (adapter weights and adapter config). """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) def test_peft_enable_disable_adapters(self): """ A test that checks if `enable_adapters` and `disable_adapters` methods work as expected. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) peft_model.add_adapter(peft_config) peft_logits = peft_model(dummy_input).logits peft_model.disable_adapters() peft_logits_disabled = peft_model(dummy_input).logits peft_model.enable_adapters() peft_logits_enabled = peft_model(dummy_input).logits self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12)) self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12)) def test_peft_add_adapter(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_add_adapter_from_pretrained(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained)) def test_peft_add_adapter_modules_to_save(self): """ Simple test that tests if `add_adapter` works as expected when training with modules to save. """ from peft import LoraConfig from peft.utils import ModulesToSaveWrapper for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"]) model.add_adapter(peft_config) self._check_lora_correctly_converted(model) _has_modules_to_save_wrapper = False for name, module in model.named_modules(): if isinstance(module, ModulesToSaveWrapper): _has_modules_to_save_wrapper = True self.assertTrue(module.modules_to_save.default.weight.requires_grad) self.assertTrue("lm_head" in name) break self.assertTrue(_has_modules_to_save_wrapper) state_dict = model.get_adapter_state_dict() self.assertTrue("lm_head.weight" in state_dict.keys()) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for _, param in model.named_parameters(): if param.requires_grad: self.assertTrue(param.grad is not None) def test_peft_add_adapter_training_gradient_checkpointing(self): """ Simple test that tests if `add_adapter` works as expected when training with gradient checkpointing. """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # When attaching adapters the input embeddings will stay frozen, this will # lead to the output embedding having requires_grad=False. dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(frozen_output.requires_grad is False) model.gradient_checkpointing_enable() # Since here we attached the hook, the input should have requires_grad to set # properly non_frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(non_frozen_output.requires_grad is True) # To repro the Trainer issue dummy_input.requires_grad = False for name, param in model.named_parameters(): if "lora" in name.lower(): self.assertTrue(param.requires_grad) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue("lora" in name.lower()) self.assertTrue(param.grad is not None) def test_peft_add_multi_adapter(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected in multi-adapter setting. """ from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: is_peft_loaded = False model = transformers_class.from_pretrained(model_id).to(torch_device) logits_original_model = model(dummy_input).logits peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) logits_adapter_1 = model(dummy_input) model.add_adapter(peft_config, adapter_name="adapter-2") logits_adapter_2 = model(dummy_input) for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break self.assertTrue(is_peft_loaded) # dummy generation _ = model.generate(input_ids=dummy_input) model.set_adapter("default") self.assertTrue(model.active_adapters() == ["default"]) self.assertTrue(model.active_adapter() == "default") model.set_adapter("adapter-2") self.assertTrue(model.active_adapters() == ["adapter-2"]) self.assertTrue(model.active_adapter() == "adapter-2") # Logits comparison self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)) model.set_adapter(["adapter-2", "default"]) self.assertTrue(model.active_adapters() == ["adapter-2", "default"]) self.assertTrue(model.active_adapter() == "adapter-2") logits_adapter_mixed = model(dummy_input) self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse( torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) # multi active adapter saving not supported with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) @require_torch_gpu @require_bitsandbytes def test_peft_from_pretrained_kwargs(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs and see if the integraiton behaves as expected. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) @require_torch_gpu @require_bitsandbytes def test_peft_save_quantized(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) @require_torch_gpu @require_bitsandbytes def test_peft_save_quantized_regression(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration. """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): """ Simple test that tests the basic usage of PEFT model + pipeline """ from transformers import pipeline for model_id in self.peft_test_model_ids: pipe = pipeline("text-generation", model_id) _ = pipe("Hello") def test_peft_add_adapter_with_state_dict(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") dummy_state_dict = torch.load(state_dict_path) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=dummy_input) def test_peft_from_pretrained_hub_kwargs(self): """ Tests different combinations of PEFT model + from_pretrained + hub kwargs """ peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" # This should not work with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} # This should work model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model))
transformers/tests/peft_integration/test_peft_integration.py/0
{ "file_path": "transformers/tests/peft_integration/test_peft_integration.py", "repo_id": "transformers", "token_count": 10456 }
431
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = ObjectDetectionPipeline(model=model, image_processor=processor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
transformers/tests/pipelines/test_pipelines_object_detection.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_object_detection.py", "repo_id": "transformers", "token_count": 6048 }
432
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path from git import Repo from transformers.testing_utils import CaptureStdout REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(REPO_PATH, "utils")) import tests_fetcher # noqa: E402 from tests_fetcher import ( # noqa: E402 checkout_commit, clean_code, create_module_to_test_map, create_reverse_dependency_map, create_reverse_dependency_tree, diff_is_docstring_only, extract_imports, get_all_tests, get_diff, get_module_dependencies, get_tree_starting_at, infer_tests_to_run, init_test_examples_dependencies, parse_commit_message, print_tree_deps_of, ) BERT_MODELING_FILE = "src/transformers/models/bert/modeling_bert.py" BERT_MODEL_FILE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code """ BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. It has been updated. ''' This is the code """ BERT_MODEL_FILE_NEW_CODE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code. It has been updated """ def create_tmp_repo(tmp_dir, models=None): """ Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models provided (which defaults to just `["bert"]`). """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) repo = Repo.init(tmp_dir) if models is None: models = ["bert"] class_names = [model[0].upper() + model[1:] for model in models] transformers_dir = tmp_dir / "src" / "transformers" transformers_dir.mkdir(parents=True, exist_ok=True) with open(transformers_dir / "__init__.py", "w") as f: init_lines = ["from .utils import cached_file, is_torch_available"] init_lines.extend( [f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)] ) f.write("\n".join(init_lines) + "\n") with open(transformers_dir / "configuration_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") with open(transformers_dir / "modeling_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") utils_dir = tmp_dir / "src" / "transformers" / "utils" utils_dir.mkdir(exist_ok=True) with open(utils_dir / "__init__.py", "w") as f: f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n") with open(utils_dir / "hub.py", "w") as f: f.write("import huggingface_hub\n\ncode") with open(utils_dir / "imports.py", "w") as f: f.write("code") model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("\n".join([f"import {model}" for model in models])) for model, cls in zip(models, class_names): model_dir = tmp_dir / "src" / "transformers" / "models" / model model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n") with open(model_dir / f"configuration_{model}.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / f"modeling_{model}.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls) f.write(modeling_code) test_dir = tmp_dir / "tests" test_dir.mkdir(exist_ok=True) with open(test_dir / "test_modeling_common.py", "w") as f: f.write("from transformers.modeling_utils import PreTrainedModel\ncode") for model, cls in zip(models, class_names): test_model_dir = test_dir / "models" / model test_model_dir.mkdir(parents=True, exist_ok=True) (test_model_dir / "__init__.py").touch() with open(test_model_dir / f"test_modeling_{model}.py", "w") as f: f.write( f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) example_dir = tmp_dir / "examples" example_dir.mkdir(exist_ok=True) for framework in ["flax", "pytorch", "tensorflow"]: framework_dir = example_dir / framework framework_dir.mkdir(exist_ok=True) with open(framework_dir / f"test_{framework}_examples.py", "w") as f: f.write("""test_args = "run_glue.py"\n""") glue_dir = framework_dir / "text-classification" glue_dir.mkdir(exist_ok=True) with open(glue_dir / "run_glue.py", "w") as f: f.write("from transformers import BertModel\n\ncode") repo.index.add(["examples", "src", "tests"]) repo.index.commit("Initial commit") repo.create_head("main") repo.head.reference = repo.refs.main repo.delete_head("master") return repo @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo. """ old_repo_path = tests_fetcher.PATH_TO_REPO tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve() tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" try: yield finally: tests_fetcher.PATH_TO_REPO = old_repo_path tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" def commit_changes(filenames, contents, repo, commit_message="Commit"): """ Commit new `contents` to `filenames` inside a given `repo`. """ if not isinstance(filenames, list): filenames = [filenames] if not isinstance(contents, list): contents = [contents] folder = Path(repo.working_dir) for filename, content in zip(filenames, contents): with open(folder / filename, "w") as f: f.write(content) repo.index.add(filenames) commit = repo.index.commit(commit_message) return commit.hexsha class TestFetcherTester(unittest.TestCase): def test_checkout_commit(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_sha = repo.head.commit.hexsha new_sha = commit_changes(BERT_MODELING_FILE, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert repo.head.commit.hexsha == new_sha with checkout_commit(repo, initial_sha): assert repo.head.commit.hexsha == initial_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE assert repo.head.commit.hexsha == new_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE_NEW_DOCSTRING def test_clean_code(self): # Clean code removes all strings in triple quotes assert clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n') == "code\ncode" assert clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''") == "code\ncode" # Clean code removes all comments assert clean_code("code\n# Comment\ncode") == "code\ncode" assert clean_code("code # inline comment\ncode") == "code \ncode" def test_get_all_tests(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): assert get_all_tests() == ["tests/models/bert", "tests/test_modeling_common.py"] def test_get_all_tests_on_full_repo(self): all_tests = get_all_tests() assert "tests/models/albert" in all_tests assert "tests/models/bert" in all_tests assert "tests/repo_utils" in all_tests assert "tests/test_pipeline_mixin.py" in all_tests assert "tests/models" not in all_tests assert "tests/__pycache__" not in all_tests assert "tests/models/albert/test_modeling_albert.py" not in all_tests assert "tests/repo_utils/test_tests_fetcher.py" not in all_tests def test_diff_is_docstring_only(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) branching_point = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert diff_is_docstring_only(repo, branching_point, bert_file) commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert not diff_is_docstring_only(repo, branching_point, bert_file) def test_get_diff(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_commit = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING + "\n# Adding a comment\n", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [ "src/transformers/models/bert/modeling_bert.py" ] commit_changes("src/transformers/utils/hub.py", "import huggingface_hub\n\nnew code", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == ["src/transformers/utils/hub.py"] assert get_diff(repo, repo.head.commit, [initial_commit]) == [ "src/transformers/models/bert/modeling_bert.py", "src/transformers/utils/hub.py", ] def test_extract_imports_relative(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_imports = [ ("src/transformers/modeling_utils.py", ["PreTrainedModel"]), ("src/transformers/utils/__init__.py", ["is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] expected_utils_imports = [ ("src/transformers/utils/hub.py", ["cached_file"]), ("src/transformers/utils/imports.py", ["is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports assert extract_imports("src/transformers/utils/__init__.py") == expected_utils_imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import cached_file, is_torch_available\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import (\n cached_file,\n is_torch_available\n)\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_extract_imports_absolute(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import cached_file, is_torch_available\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with base imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/__init__.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_get_module_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies expected_test_bert_dependencies = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("tests/models/bert/test_modeling_bert.py") == expected_test_bert_dependencies ) # Test with a submodule (tmp_folder / "src/transformers/utils/logging.py").touch() with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import logging\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/logging.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an object non-imported in the init create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import CONSTANT\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/__init__.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an example create_tmp_repo(tmp_folder) expected_example_dependencies = ["src/transformers/models/bert/modeling_bert.py"] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("examples/pytorch/text-classification/run_glue.py") == expected_example_dependencies ) def test_create_reverse_dependency_tree(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): tree = create_reverse_dependency_tree() init_edges = [ "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "src/transformers/__init__.py"} == set(init_edges) bert_edges = [ "src/transformers/modeling_utils.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", ] assert {f for f, g in tree if g == "src/transformers/models/bert/modeling_bert.py"} == set(bert_edges) test_bert_edges = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "tests/models/bert/test_modeling_bert.py"} == set(test_bert_edges) def test_get_tree_starting_at(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): edges = create_reverse_dependency_tree() bert_tree = get_tree_starting_at("src/transformers/models/bert/modeling_bert.py", edges) config_utils_tree = get_tree_starting_at("src/transformers/configuration_utils.py", edges) expected_bert_tree = [ "src/transformers/models/bert/modeling_bert.py", [("src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py")], ] assert bert_tree == expected_bert_tree expected_config_tree = [ "src/transformers/configuration_utils.py", [("src/transformers/configuration_utils.py", "src/transformers/models/bert/configuration_bert.py")], [ ("src/transformers/models/bert/configuration_bert.py", "tests/models/bert/test_modeling_bert.py"), ( "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ), ], ] # Order of the edges is random assert [set(v) for v in config_utils_tree] == [set(v) for v in expected_config_tree] def test_print_tree_deps_of(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) # There are two possible outputs since the order of the last two lines is non-deterministic. expected_std_out = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py""" expected_std_out_2 = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py tests/models/bert/test_modeling_bert.py src/transformers/models/bert/modeling_bert.py""" with patch_transformer_repo_path(tmp_folder), CaptureStdout() as cs: print_tree_deps_of("src/transformers/models/bert/modeling_bert.py") print_tree_deps_of("src/transformers/configuration_utils.py") assert cs.out.strip() in [expected_std_out, expected_std_out_2] def test_init_test_examples_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_example_deps = { "examples/flax/test_flax_examples.py": [ "examples/flax/text-classification/run_glue.py", "examples/flax/test_flax_examples.py", ], "examples/pytorch/test_pytorch_examples.py": [ "examples/pytorch/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", ], "examples/tensorflow/test_tensorflow_examples.py": [ "examples/tensorflow/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", ], } expected_examples = { "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } with patch_transformer_repo_path(tmp_folder): example_deps, all_examples = init_test_examples_dependencies() assert example_deps == expected_example_deps assert {str(f.relative_to(tmp_folder)) for f in all_examples} == expected_examples def test_create_reverse_dependency_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # impact of BERT modeling file (note that we stop at the inits and don't go down further) expected_bert_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/__init__.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/modeling_bert.py"]) == expected_bert_deps # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/utils/__init__.py", "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/configuration_utils.py", "src/transformers/modeling_utils.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/__init__.py"]) == expected_init_deps expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps # Test that with more models init of bert only gets deps to bert. create_tmp_repo(tmp_folder, models=["bert", "gpt2"]) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps def test_create_module_to_test_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] create_tmp_repo(tmp_folder, models=models) with patch_transformer_repo_path(tmp_folder): test_map = create_module_to_test_map(filter_models=True) expected_bert_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/models/bert/test_modeling_bert.py", } for model in models: if model != "bert": assert test_map[f"src/transformers/models/{model}/modeling_{model}.py"] == [ f"tests/models/{model}/test_modeling_{model}.py" ] else: assert set(test_map[f"src/transformers/models/{model}/modeling_{model}.py"]) == expected_bert_tests # Init got filtered expected_init_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", } assert set(test_map["src/transformers/__init__.py"]) == expected_init_tests def test_infer_tests_to_run(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes("src/transformers/models/bert/modeling_bert.py", BERT_MODEL_FILE_NEW_CODE, repo) example_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", } with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" assert set(example_tests_to_run.split(" ")) == example_tests # Fake a new model addition repo = create_tmp_repo(tmp_folder, models=models) branch = repo.create_head("new_model") branch.checkout() with open(tmp_folder / "src/transformers/__init__.py", "a") as f: f.write("from .models.t5 import T5Config, T5Model\n") model_dir = tmp_folder / "src/transformers/models/t5" model_dir.mkdir(exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("from .configuration_t5 import T5Config\nfrom .modeling_t5 import T5Model\n") with open(model_dir / "configuration_t5.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / "modeling_t5.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", "t5").replace("Bert", "T5") f.write(modeling_code) test_dir = tmp_folder / "tests/models/t5" test_dir.mkdir(exist_ok=True) (test_dir / "__init__.py").touch() with open(test_dir / "test_modeling_t5.py", "w") as f: f.write( "from transformers import T5Config, T5Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) repo.index.add(["src", "tests"]) repo.index.commit("Add T5 model") with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt") with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = { "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", "tests/models/t5/test_modeling_t5.py", "tests/test_modeling_common.py", } assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", filter_models=False) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = [f"tests/models/{name}/test_modeling_{name}.py" for name in models + ["t5"]] expected_tests = set(expected_tests + ["tests/test_modeling_common.py"]) assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests def test_infer_tests_to_run_with_test_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "tests/models/bert/test_modeling_bert.py", "from transformers import BertConfig, BertModel\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" def test_infer_tests_to_run_with_examples_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] repo = create_tmp_repo(tmp_folder, models=models) # Modification in one example trigger the corresponding test commit_changes( "examples/pytorch/text-classification/run_glue.py", "from transformers import BertModeln\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" # Modification in one test example file trigger that test repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "examples/pytorch/test_pytorch_examples.py", """test_args = "run_glue.py"\nmore_code""", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" def test_parse_commit_message(self): assert parse_commit_message("Normal commit") == {"skip": False, "no_filter": False, "test_all": False} assert parse_commit_message("[skip ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[ci skip] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip-ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip_ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[no filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no-filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no_filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[filter-no] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[test all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all test] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[test-all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all_test] commit") == {"skip": False, "no_filter": False, "test_all": True}
transformers/tests/repo_utils/test_tests_fetcher.py/0
{ "file_path": "transformers/tests/repo_utils/test_tests_fetcher.py", "repo_id": "transformers", "token_count": 18132 }
433
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPT2Config from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 config_common_kwargs = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-config-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-config") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("test-config", token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-config", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org", token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_dynamic_config(self): CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) config.push_to_hub("test-dynamic-config", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, "CustomConfig") self.assertEqual(new_config.attribute, 42) class ConfigTestUtils(unittest.TestCase): def test_config_from_string(self): c = GPT2Config() # attempt to modify each of int/float/bool/str config records and verify they were updated n_embd = c.n_embd + 1 # int resid_pdrop = c.resid_pdrop + 1.0 # float scale_attn_weights = not c.scale_attn_weights # bool summary_type = c.summary_type + "foo" # str c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd") self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop") self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights") self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type") def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( missing_keys, [ "is_encoder_decoder", "_name_or_path", "_commit_hash", "_attn_implementation_internal", "transformers_version", ], ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f" {', '.join(keys_with_defaults)}." ) def test_nested_config_load_from_dict(self): config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-CLIPModel", text_config={"num_hidden_layers": 2} ) self.assertNotIsInstance(config.text_config, dict) self.assertEqual(config.text_config.__class__.__name__, "CLIPTextConfig") def test_from_pretrained_subfolder(self): with self.assertRaises(OSError): # config is in subfolder, the following should not work without specifying the subfolder _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert") self.assertIsNotNone(config) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_local_versioning(self): configuration = AutoConfig.from_pretrained("google-bert/bert-base-cased") configuration.configuration_files = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(tmp_dir) configuration.hidden_size = 2 json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w")) # This should pick the new configuration file as the version of Transformers is > 4.0.0 new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 configuration.configuration_files = ["config.42.0.0.json"] configuration.hidden_size = 768 configuration.save_pretrained(tmp_dir) shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 768) def test_repo_versioning_before(self): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. repo = "hf-internal-testing/test-two-configs" import transformers as new_transformers new_transformers.configuration_utils.__version__ = "v4.0.0" new_configuration, kwargs = new_transformers.models.auto.AutoConfig.from_pretrained( repo, return_unused_kwargs=True ) self.assertEqual(new_configuration.hidden_size, 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(kwargs, {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.configuration_utils.__version__ = "v3.0.0" old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo) self.assertEqual(old_configuration.hidden_size, 768) def test_saving_config_with_custom_generation_kwargs_raises_warning(self): config = BertConfig(min_length=3) # `min_length = 3` is a non-default generation kwarg with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs("transformers.configuration_utils", level="WARNING") as logs: config.save_pretrained(tmp_dir) self.assertEqual(len(logs.output), 1) self.assertIn("min_length", logs.output[0]) def test_has_non_default_generation_parameters(self): config = BertConfig() self.assertFalse(config._has_non_default_generation_parameters()) config = BertConfig(min_length=3) self.assertTrue(config._has_non_default_generation_parameters()) config = BertConfig(min_length=0) # `min_length = 0` is a default generation kwarg self.assertFalse(config._has_non_default_generation_parameters())
transformers/tests/test_configuration_utils.py/0
{ "file_path": "transformers/tests/test_configuration_utils.py", "repo_id": "transformers", "token_count": 5352 }
434
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json"): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") # The tiny random BERT has a vocab size of 1024, tiny openai-community/gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-tokenizer") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("test-tokenizer", token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="test-tokenizer") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("valid_org/test-tokenizer-org", token=self._token) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, token=self._token ) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer", use_fast=False, trust_remote_code=True ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") trie.data self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): # Even if the offsets are wrong, we necessarily output correct string # parts. trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"])
transformers/tests/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 4920 }
435
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image authorized_types = ["text", "image", "audio"] def create_inputs(input_types: List[str]): inputs = [] for input_type in input_types: if input_type == "text": inputs.append("Text input") elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512)) ) elif input_type == "audio": inputs.append(torch.ones(3000)) elif isinstance(input_type, list): inputs.append(create_inputs(input_type)) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def output_types(outputs: List): output_types = [] for output in outputs: if isinstance(output, (str, AgentText)): output_types.append("text") elif isinstance(output, (Image.Image, AgentImage)): output_types.append("image") elif isinstance(output, (torch.Tensor, AgentAudio)): output_types.append("audio") else: raise ValueError(f"Invalid output: {output}") return output_types @is_tool_test class ToolTesterMixin: def test_inputs_outputs(self): self.assertTrue(hasattr(self.tool, "inputs")) self.assertTrue(hasattr(self.tool, "outputs")) inputs = self.tool.inputs for _input in inputs: if isinstance(_input, list): for __input in _input: self.assertTrue(__input in authorized_types) else: self.assertTrue(_input in authorized_types) outputs = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types) def test_call(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) # There is a single output if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_common_attributes(self): self.assertTrue(hasattr(self.tool, "description")) self.assertTrue(hasattr(self.tool, "default_checkpoint")) self.assertTrue(self.tool.description.startswith("This is a tool that")) def test_agent_types_outputs(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = create_inputs(self.tool.inputs) _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
transformers/tests/tools/test_tools_common.py/0
{ "file_path": "transformers/tests/tools/test_tools_common.py", "repo_id": "transformers", "token_count": 1790 }
436
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import unittest from unittest.mock import patch from transformers.testing_utils import CaptureStd, is_pt_tf_cross_test, require_torch class CLITest(unittest.TestCase): @patch("sys.argv", ["fakeprogrampath", "env"]) def test_cli_env(self): # test transformers-cli env import transformers.commands.transformers_cli with CaptureStd() as cs: transformers.commands.transformers_cli.main() self.assertIn("Python version", cs.out) self.assertIn("Platform", cs.out) self.assertIn("Using distributed or parallel set-up in script?", cs.out) @is_pt_tf_cross_test @patch( "sys.argv", ["fakeprogrampath", "pt-to-tf", "--model-name", "hf-internal-testing/tiny-random-gptj", "--no-pr"] ) def test_cli_pt_to_tf(self): import transformers.commands.transformers_cli shutil.rmtree("/tmp/hf-internal-testing/tiny-random-gptj", ignore_errors=True) # cleans potential past runs transformers.commands.transformers_cli.main() self.assertTrue(os.path.exists("/tmp/hf-internal-testing/tiny-random-gptj/tf_model.h5")) @require_torch @patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"]) def test_cli_download(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots")) @require_torch @patch( "sys.argv", [ "fakeprogrampath", "download", "hf-internal-testing/test_dynamic_model_with_tokenizer", "--trust-remote-code", "--cache-dir", "/tmp", ], ) def test_cli_download_trust_remote(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs")) self.assertTrue( os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots") )
transformers/tests/utils/test_cli.py/0
{ "file_path": "transformers/tests/utils/test_cli.py", "repo_id": "transformers", "token_count": 1489 }
437
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import sys from transformers.testing_utils import TestCasePlus from transformers.utils.versions import require_version, require_version_core numpy_ver = importlib.metadata.version("numpy") python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # mix require_version_core("numpy>1.0.0,<1000") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except importlib.metadata.PackageNotFoundError as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: `pip install transformers -U`", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
transformers/tests/utils/test_versions_utils.py/0
{ "file_path": "transformers/tests/utils/test_versions_utils.py", "repo_id": "transformers", "token_count": 1539 }
438
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks the big table in the file docs/source/en/index.md and potentially updates it. Use from the root of the repo with: ```bash python utils/check_inits.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_inits.py --fix_and_overwrite ``` which is used by `make fix-copies`. """ import argparse import collections import os import re from typing import List from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py TRANSFORMERS_PATH = "src/transformers" PATH_TO_DOCS = "docs/source/en" REPO_PATH = "." def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> str: """ Find the text in filename between two prompts. Args: filename (`str`): The file to search into. start_prompt (`str`): A string to look for at the start of the content searched. end_prompt (`str`): A string that will mark the end of the content to look for. Returns: `str`: The content between the prompts. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 # Now go until the end prompt. end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # Regexes that match TF/Flax/PT model names. Add here suffixes that are used to identify models, separated by | _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch after the two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as seprated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def _center_text(text: str, width: int) -> str: """ Utility that will add spaces on the left and right of a text to make it centered for a given width. Args: text (`str`): The text to center. width (`int`): The desired length of the result. Returns: `str`: A text of length `width` with the original `text` in the middle. """ text_length = 2 if text == "✅" or text == "❌" else len(text) left_indent = (width - text_length) // 2 right_indent = width - text_length - left_indent return " " * left_indent + text + " " * right_indent SPECIAL_MODEL_NAME_LINK_MAPPING = { "Data2VecAudio": "[Data2VecAudio](model_doc/data2vec)", "Data2VecText": "[Data2VecText](model_doc/data2vec)", "Data2VecVision": "[Data2VecVision](model_doc/data2vec)", "DonutSwin": "[DonutSwin](model_doc/donut)", } MODEL_NAMES_WITH_SAME_CONFIG = { "BARThez": "BART", "BARTpho": "BART", "BertJapanese": "BERT", "BERTweet": "BERT", "BORT": "BERT", "ByT5": "T5", "CPM": "OpenAI GPT-2", "DePlot": "Pix2Struct", "DialoGPT": "OpenAI GPT-2", "DiT": "BEiT", "FLAN-T5": "T5", "FLAN-UL2": "T5", "HerBERT": "BERT", "LayoutXLM": "LayoutLMv2", "Llama2": "LLaMA", "MADLAD-400": "T5", "MatCha": "Pix2Struct", "mBART-50": "mBART", "Megatron-GPT2": "OpenAI GPT-2", "mLUKE": "LUKE", "MMS": "Wav2Vec2", "NLLB": "M2M100", "PhoBERT": "BERT", "T5v1.1": "T5", "TAPEX": "BART", "UL2": "T5", "Wav2Vec2Phoneme": "Wav2Vec2", "XLM-V": "XLM-RoBERTa", "XLS-R": "Wav2Vec2", "XLSR-Wav2Vec2": "Wav2Vec2", } MODEL_NAMES_TO_IGNORE = ["CLIPVisionModel", "SiglipVisionModel", "ChineseCLIPVisionModel"] def get_model_table_from_auto_modules() -> str: """ Generates an up-to-date model table from the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_name_to_config = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } model_name_to_prefix = {name: config.replace("Config", "") for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once). for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_name_to_prefix.values(): lookup_dict[attr_name] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) # Let's build that table! model_names = list(model_name_to_config.keys()) + list(MODEL_NAMES_WITH_SAME_CONFIG.keys()) # model name to doc link mapping model_names_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING model_name_to_link_mapping = {value: f"[{value}](model_doc/{key})" for key, value in model_names_mapping.items()} # update mapping with special model names model_name_to_link_mapping = { k: SPECIAL_MODEL_NAME_LINK_MAPPING[k] if k in SPECIAL_MODEL_NAME_LINK_MAPPING else v for k, v in model_name_to_link_mapping.items() } # MaskFormerSwin and TimmBackbone are backbones and so not meant to be loaded and used on their own. Instead, they define architectures which can be loaded using the AutoBackbone API. names_to_exclude = ["MaskFormerSwin", "TimmBackbone", "Speech2Text2"] model_names = [name for name in model_names if name not in names_to_exclude] model_names.sort(key=str.lower) columns = ["Model", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). widths = [len(c) + 2 for c in columns] widths[0] = max([len(doc_link) for doc_link in model_name_to_link_mapping.values()]) + 2 # Build the table per se table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n" check = {True: "✅", False: "❌"} for name in model_names: if name in MODEL_NAMES_TO_IGNORE: continue if name in MODEL_NAMES_WITH_SAME_CONFIG.keys(): prefix = model_name_to_prefix[MODEL_NAMES_WITH_SAME_CONFIG[name]] else: prefix = model_name_to_prefix[name] line = [ model_name_to_link_mapping[name], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n" return table def check_model_table(overwrite=False): """ Check the model table in the index.md is consistent with the state of the lib and potentially fix it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the table when it's not up to date. """ current_table, start_index, end_index, lines = _find_text_in_file( filename=os.path.join(PATH_TO_DOCS, "index.md"), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", ) new_table = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:]) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_table(args.fix_and_overwrite)
transformers/utils/check_table.py/0
{ "file_path": "transformers/utils/check_table.py", "repo_id": "transformers", "token_count": 4363 }
439
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that updates the metadata of the Transformers library in the repository `huggingface/transformers-metadata`. Usage for an update (as used by the GitHub action `update_metadata`): ```bash python utils/update_metadata.py --token <token> --commit_sha <commit_sha> ``` Usage to check all pipelines are properly defined in the constant `PIPELINE_TAGS_AND_AUTO_MODELS` of this script, so that new pipelines are properly added as metadata (as used in `make repo-consistency`): ```bash python utils/update_metadata.py --check-only ``` """ import argparse import collections import os import re import tempfile from typing import Dict, List, Tuple import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py TRANSFORMERS_PATH = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) PIPELINE_TAGS_AND_AUTO_MODELS = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("image-feature-extraction", "MODEL_FOR_IMAGE_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("image-to-image", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "AutoModelForImageToImage"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ("text-to-audio", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES", "AutoModelForTextToSpectrogram"), ("text-to-audio", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES", "AutoModelForTextToWaveform"), ] def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as seprated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def get_frameworks_table() -> pd.DataFrame: """ Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_prefix_to_model_type = { config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_prefix_to_model_type: lookup_dict[model_prefix_to_model_type[attr_name]] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) all_models = list(all_models) all_models.sort() data = {"model_type": all_models} data["pytorch"] = [pt_models[t] for t in all_models] data["tensorflow"] = [tf_models[t] for t in all_models] data["flax"] = [flax_models[t] for t in all_models] # Now let's find the right processing class for each model. In order we check if there is a Processor, then a # Tokenizer, then a FeatureExtractor, then an ImageProcessor processors = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES: processors[t] = "AutoImageProcessor" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. processors[t] = "AutoTokenizer" data["processor"] = [processors[t] for t in all_models] return pd.DataFrame(data) def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]: """ Update the table maping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format. """ auto_modules = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"] auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): # The type of pipeline may not exist in this framework if not hasattr(module, mapping): continue # First extract all model_names model_names = [] for name in getattr(module, mapping).values(): if isinstance(name, str): model_names.append(name) else: model_names.extend(list(name)) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names}) return table def update_metadata(token: str, commit_sha: str): """ Update the metadata for the Transformers repo in `huggingface/transformers-metadata`. Args: token (`str`): A valid token giving write access to `huggingface/transformers-metadata`. commit_sha (`str`): The commit SHA on Transformers corresponding to this update. """ frameworks_table = get_frameworks_table() frameworks_dataset = Dataset.from_pandas(frameworks_table) resolved_tags_file = hf_hub_download( "huggingface/transformers-metadata", "pipeline_tags.json", repo_type="dataset", token=token ) tags_dataset = Dataset.from_json(resolved_tags_file) table = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(tags_dataset)) } table = update_pipeline_and_auto_class_table(table) # Sort the model classes to avoid some nondeterministic updates to create false update commits. model_classes = sorted(table.keys()) tags_table = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) tags_dataset = Dataset.from_pandas(tags_table) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json")) tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json")) if commit_sha is not None: commit_message = ( f"Update with commit {commit_sha}\n\nSee: " f"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: commit_message = "Update" upload_folder( repo_id="huggingface/transformers-metadata", folder_path=tmp_dir, repo_type="dataset", token=token, commit_message=commit_message, ) def check_pipeline_tags(): """ Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script. """ in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS missing = [] for key in pipeline_tasks: if key not in in_table: model = pipeline_tasks[key]["pt"] if isinstance(model, (list, tuple)): model = model[0] model = model.__name__ if model not in in_table.values(): missing.append(key) if len(missing) > 0: msg = ", ".join(missing) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " f"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") args = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
transformers/utils/update_metadata.py/0
{ "file_path": "transformers/utils/update_metadata.py", "repo_id": "transformers", "token_count": 5916 }
440
# hello world experiment python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/dpo.py --model_name_or_path=gpt2 --per_device_train_batch_size 4 --max_steps 1000 --learning_rate 1e-3 --gradient_accumulation_steps 1 --logging_steps 10 --eval_steps 500 --output_dir="dpo_anthropic_hh" --optim adamw_torch --warmup_steps 150 --report_to wandb --bf16 --logging_first_step --no_remove_unused_columns" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/sft.py --model_name_or_path="facebook/opt-350m" --report_to="wandb" --learning_rate=1.41e-5 --per_device_train_batch_size=64 --gradient_accumulation_steps=16 --output_dir="sft_openassistant-guanaco" --logging_steps=1 --num_train_epochs=3 --max_steps=-1 --push_to_hub --gradient_checkpointing" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/reward_modeling.py --model_name_or_path=facebook/opt-350m --output_dir="reward_modeling_anthropic_hh" --per_device_train_batch_size=64 --num_train_epochs=1 --gradient_accumulation_steps=16 --gradient_checkpointing=True --learning_rate=1.41e-5 --report_to="wandb" --remove_unused_columns=False --optim="adamw_torch" --logging_steps=10 --evaluation_strategy="steps" --max_length=512" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template
trl/benchmark/benchmark_level1.sh/0
{ "file_path": "trl/benchmark/benchmark_level1.sh", "repo_id": "trl", "token_count": 906 }
441
# Best of N sampling: Alternative ways to get better model output without RL based fine-tuning Within the extras module is the `best-of-n` sampler class that serves as an alternative method of generating better model output. As to how it fares against the RL based fine-tuning, please look in the `examples` directory for a comparison example ## Usage To get started quickly, instantiate an instance of the class with a model, a length sampler, a tokenizer and a callable that serves as a proxy reward pipeline that outputs reward scores for input queries ```python from transformers import pipeline, AutoTokenizer from trl import AutoModelForCausalLMWithValueHead from trl.core import LengthSampler from trl.extras import BestOfNSampler ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device) tokenizer = AutoTokenizer.from_pretrained(ref_model_name) tokenizer.pad_token = tokenizer.eos_token # callable that takes a list of raw text and returns a list of corresponding reward scores def queries_to_scores(list_of_strings): return [output["score"] for output in reward_pipe(list_of_strings)] best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler) ``` And assuming you have a list/tensor of tokenized queries, you can generate better output by calling the `generate` method ```python best_of_n.generate(query_tensors, device=device, **gen_kwargs) ``` The default sample size is 4, but you can change it at the time of instance initialization like so ```python best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, sample_size=8) ``` The default output is the result of taking the top scored output for each query, but you can change it to top 2 and so on by passing the `n_candidates` argument at the time of instance initialization ```python best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, n_candidates=2) ``` There is the option of setting the generation settings (like `temperature`, `pad_token_id`) at the time of instance creation as opposed to when calling the `generate` method. This is done by passing a `GenerationConfig` from the `transformers` library at the time of initialization ```python from transformers import GenerationConfig generation_config = GenerationConfig(min_length= -1, top_k=0.0, top_p= 1.0, do_sample= True, pad_token_id=tokenizer.eos_token_id) best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, generation_config=generation_config) best_of_n.generate(query_tensors, device=device) ``` Furthermore, at the time of initialization you can set the seed to control repeatability of the generation process and the number of samples to generate for each query
trl/docs/source/best_of_n.mdx/0
{ "file_path": "trl/docs/source/best_of_n.mdx", "repo_id": "trl", "token_count": 840 }
442
# Multi Adapter RL (MARL) - a single base model for everything Here we present an approach that uses a single base model for the entire PPO algorithm - which includes retrieving the reference logits, computing the active logits and the rewards. This feature is experimental as we did not test the convergence of the approach. We encourage the community to let us know if they potentially face issues. ## Requirements You just need to install `peft` and optionally install `bitsandbytes` as well if you want to go for 8bit base models, for more memory efficient finetuning. ## Summary You need to address this approach in three stages that we summarize as follows: 1- Train a base model on the target domain (e.g. `imdb` dataset) - this is the Supervised Fine Tuning stage - it can leverage the `SFTTrainer` from TRL. 2- Train a reward model using `peft`. This is required in order to re-use the adapter during the RL optimisation process (step 3 below). We show an example of leveraging the `RewardTrainer` from TRL in [this example](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py) 3- Fine tune new adapters on the base model using PPO and the reward adapter. ("0 abstraction RL") Make sure to use the same model (i.e. same architecture and same weights) for the stages 2 & 3. ## Quickstart Let us assume you have trained your reward adapter on `llama-7b` model using `RewardTrainer` and pushed the weights on the hub under `trl-lib/llama-7b-hh-rm-adapter`. When doing PPO, before passing the model to `PPOTrainer` create your model as follows: ```python model_name = "huggyllama/llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, ) ... trainer = PPOTrainer( model=model, ... ) ... ``` Then inside your PPO training loop, call the `compute_reward_score` method by accessing the `model` attribute from `PPOTrainer`. ```python rewards = trainer.model.compute_reward_score(**inputs) ``` ## Advanced usage ### Control on the adapter name If you are familiar with the `peft` library, you know that you can use multiple adapters inside the same model. What you can do is train multiple adapters on the same base model to fine-tune on different policies. In this case, you want to be able to control the adapter name you want to activate back, after retrieving the reward. For that, simply pass the appropriate `adapter_name` to `ppo_adapter_name` argument when calling `compute_reward_score`. ```python adapter_name_policy_1 = "policy_1" rewards = trainer.model.compute_reward_score(**inputs, ppo_adapter_name=adapter_name_policy_1) ... ``` ### Using 4-bit and 8-bit base models For more memory efficient fine-tuning, you can load your base model in 8-bit or 4-bit while keeping the adapters in the default precision (float32). Just pass the appropriate arguments (i.e. `load_in_8bit=True` or `load_in_4bit=True`) to `AutoModelForCausalLMWithValueHead.from_pretrained` as follows (assuming you have installed `bitsandbytes`): ```python model_name = "llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, load_in_8bit=True, ) ... trainer = PPOTrainer( model=model, ... ) ... ```
trl/docs/source/multi_adapter_rl.mdx/0
{ "file_path": "trl/docs/source/multi_adapter_rl.mdx", "repo_id": "trl", "token_count": 1207 }
443
# 0. imports import os from dataclasses import dataclass, field from typing import Dict, Optional import torch from accelerate import Accelerator from datasets import Dataset, load_dataset from peft import LoraConfig from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments, set_seed from trl import DPOTrainer # Define and parse arguments. @dataclass class ScriptArguments: """ The arguments for the DPO training script. """ # data parameters beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"}) # training parameters model_name_or_path: Optional[str] = field( default="../sft/results/final_checkpoint", metadata={"help": "the location of the SFT model name or path"}, ) learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"}) lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"}) warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"}) weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"}) optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"}) per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"}) per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"}) gradient_accumulation_steps: Optional[int] = field( default=4, metadata={"help": "the number of gradient accumulation steps"} ) gradient_checkpointing: Optional[bool] = field( default=True, metadata={"help": "whether to use gradient checkpointing"} ) gradient_checkpointing_use_reentrant: Optional[bool] = field( default=False, metadata={"help": "whether to use reentrant for gradient checkpointing"} ) lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"}) max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"}) max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"}) logging_steps: Optional[int] = field(default=10, metadata={"help": "the logging frequency"}) save_steps: Optional[int] = field(default=100, metadata={"help": "the saving frequency"}) eval_steps: Optional[int] = field(default=100, metadata={"help": "the evaluation frequency"}) output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"}) log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"}) load_in_4bit: Optional[bool] = field(default=True, metadata={"help": "whether to load the model in 4bit"}) model_dtype: Optional[str] = field( default="float16", metadata={"help": "model_dtype[float16, bfloat16, float] for loading."} ) # instrumentation sanity_check: Optional[bool] = field(default=False, metadata={"help": "only train on 1000 samples"}) report_to: Optional[str] = field( default="wandb", metadata={ "help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,' '`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. ' 'Use `"all"` to report to all integrations installed, `"none"` for no integrations.' }, ) # debug argument for distributed training ignore_bias_buffers: Optional[bool] = field( default=False, metadata={ "help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" }, ) seed: Optional[int] = field( default=0, metadata={"help": "Random seed that will be set at the beginning of training."} ) def get_stack_exchange_paired( data_dir: str = "data/rl", sanity_check: bool = False, cache_dir: Optional[str] = None, num_proc=24, ) -> Dataset: """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], 'chosen': List[str], 'rejected': List[str], } Prompts are structured as follows: "Question: " + <prompt> + "\n\nAnswer: " """ dataset = load_dataset( "lvwerra/stack-exchange-paired", split="train", cache_dir=cache_dir, data_dir=data_dir, ) original_columns = dataset.column_names if sanity_check: dataset = dataset.select(range(min(len(dataset), 1000))) def return_prompt_and_responses(samples) -> Dict[str, str]: return { "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]], "chosen": samples["response_j"], "rejected": samples["response_k"], } return dataset.map( return_prompt_and_responses, batched=True, num_proc=num_proc, remove_columns=original_columns, ) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] set_seed(script_args.seed) # 1. load a pretrained model torch_dtype = torch.float if script_args.model_dtype == "float16": torch_dtype = torch.float16 elif script_args.model_dtype == "bfloat16": torch_dtype = torch.bfloat16 model = AutoModelForCausalLM.from_pretrained( script_args.model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch_dtype, load_in_4bit=script_args.load_in_4bit, device_map={"": Accelerator().local_process_index}, ) model.config.use_cache = False if script_args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") tokenizer.pad_token = tokenizer.eos_token # 2. Load the Stack-exchange paired dataset train_dataset = get_stack_exchange_paired(data_dir="data/rl", sanity_check=script_args.sanity_check) train_dataset = train_dataset.filter( lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length ) # 3. Load evaluation dataset eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation", sanity_check=True) eval_dataset = eval_dataset.filter( lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length ) # 4. initialize training arguments: training_args = TrainingArguments( per_device_train_batch_size=script_args.per_device_train_batch_size, per_device_eval_batch_size=script_args.per_device_eval_batch_size, max_steps=script_args.max_steps, logging_steps=script_args.logging_steps, save_steps=script_args.save_steps, gradient_accumulation_steps=script_args.gradient_accumulation_steps, gradient_checkpointing=script_args.gradient_checkpointing, learning_rate=script_args.learning_rate, evaluation_strategy="steps", eval_steps=script_args.eval_steps, output_dir=script_args.output_dir, report_to=script_args.report_to, lr_scheduler_type=script_args.lr_scheduler_type, warmup_steps=script_args.warmup_steps, optim=script_args.optimizer_type, bf16=True, remove_unused_columns=False, run_name="dpo_llama2", gradient_checkpointing_kwargs=dict(use_reentrant=script_args.gradient_checkpointing_use_reentrant), seed=script_args.seed, ) peft_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, lora_dropout=script_args.lora_dropout, target_modules=[ "q_proj", "v_proj", "k_proj", "out_proj", "fc_in", "fc_out", "wte", ], bias="none", task_type="CAUSAL_LM", ) # 5. initialize the DPO trainer dpo_trainer = DPOTrainer( model, ref_model=None, args=training_args, beta=script_args.beta, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, peft_config=peft_config, max_prompt_length=script_args.max_prompt_length, max_length=script_args.max_length, ) # 6. train dpo_trainer.train() dpo_trainer.save_model(script_args.output_dir) # 7. save output_dir = os.path.join(script_args.output_dir, "final_checkpoint") dpo_trainer.model.save_pretrained(output_dir)
trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py", "repo_id": "trl", "token_count": 3863 }
444
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ python examples/scripts/reward_modeling.py \ --model_name_or_path=facebook/opt-350m \ --output_dir="reward_modeling_anthropic_hh" \ --per_device_train_batch_size=64 \ --num_train_epochs=1 \ --gradient_accumulation_steps=16 \ --gradient_checkpointing=True \ --learning_rate=1.41e-5 \ --report_to="wandb" \ --remove_unused_columns=False \ --optim="adamw_torch" \ --logging_steps=10 \ --evaluation_strategy="steps" \ --max_length=512 \ """ import warnings import torch from datasets import load_dataset from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser from trl import ModelConfig, RewardConfig, RewardTrainer, get_kbit_device_map, get_peft_config, get_quantization_config tqdm.pandas() if __name__ == "__main__": parser = HfArgumentParser((RewardConfig, ModelConfig)) reward_config, model_config = parser.parse_args_into_dataclasses() reward_config.gradient_checkpointing_kwargs = dict(use_reentrant=False) ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path, use_fast=True) model = AutoModelForSequenceClassification.from_pretrained( model_config.model_name_or_path, num_labels=1, **model_kwargs ) if model_config.lora_task_type != "SEQ_CLS": warnings.warn( "You are using a `task_type` that is different than `SEQ_CLS` for PEFT. This will lead to silent bugs" " Make sure to pass --lora_task_type SEQ_CLS when using this script." ) ################ # Dataset ################ raw_datasets = load_dataset("Anthropic/hh-rlhf") # Tokenize chosen/rejected pairs of inputs # Adapt this section to your needs for custom datasets def preprocess_function(examples): new_examples = { "input_ids_chosen": [], "attention_mask_chosen": [], "input_ids_rejected": [], "attention_mask_rejected": [], } for chosen, rejected in zip(examples["chosen"], examples["rejected"]): tokenized_chosen = tokenizer(chosen) tokenized_rejected = tokenizer(rejected) new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"]) new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"]) new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"]) new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"]) return new_examples # Preprocess the dataset and filter out examples that are longer than args.max_length raw_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=4, ) raw_datasets = raw_datasets.filter( lambda x: len(x["input_ids_chosen"]) <= reward_config.max_length and len(x["input_ids_rejected"]) <= reward_config.max_length ) train_dataset = raw_datasets["train"] eval_dataset = raw_datasets["test"] ################ # Training ################ trainer = RewardTrainer( model=model, tokenizer=tokenizer, args=reward_config, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=get_peft_config(model_config), ) trainer.train() trainer.save_model(reward_config.output_dir)
trl/examples/scripts/reward_modeling.py/0
{ "file_path": "trl/examples/scripts/reward_modeling.py", "repo_id": "trl", "token_count": 1819 }
445
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from trl.core import masked_mean, masked_var, masked_whiten, whiten class CoreTester(unittest.TestCase): """ A wrapper class for testing core utils functions """ @classmethod def setUpClass(cls): cls.test_input = torch.Tensor([1, 2, 3, 4]) cls.test_mask = torch.Tensor([0, 1, 1, 0]) cls.test_input_unmasked = cls.test_input[1:3] def test_masked_mean(self): assert torch.mean(self.test_input_unmasked) == masked_mean(self.test_input, self.test_mask) def test_masked_var(self): assert torch.var(self.test_input_unmasked) == masked_var(self.test_input, self.test_mask) def test_masked_whiten(self): whiten_unmasked = whiten(self.test_input_unmasked) whiten_masked = masked_whiten(self.test_input, self.test_mask)[1:3] diffs = (whiten_unmasked - whiten_masked).sum() assert abs(diffs.item()) < 0.00001
trl/tests/test_core.py/0
{ "file_path": "trl/tests/test_core.py", "repo_id": "trl", "token_count": 568 }
446
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from trl import ( is_bitsandbytes_available, is_diffusers_available, is_peft_available, is_wandb_available, is_xpu_available, ) def require_peft(test_case): """ Decorator marking a test that requires peft. Skips the test if peft is not available. """ if not is_peft_available(): test_case = unittest.skip("test requires peft")(test_case) return test_case def require_bitsandbytes(test_case): """ Decorator marking a test that requires bnb. Skips the test if bnb is not available. """ if not is_bitsandbytes_available(): test_case = unittest.skip("test requires bnb")(test_case) return test_case def require_diffusers(test_case): """ Decorator marking a test that requires diffusers. Skips the test if diffusers is not available. """ if not is_diffusers_available(): test_case = unittest.skip("test requires diffusers")(test_case) return test_case def require_wandb(test_case, required: bool = True): """ Decorator marking a test that requires wandb. Skips the test if wandb is not available. """ # XOR, i.e.: # skip if available and required = False and # skip if not available and required = True if is_wandb_available() ^ required: test_case = unittest.skip("test requires wandb")(test_case) return test_case def require_no_wandb(test_case): """ Decorator marking a test that requires no wandb. Skips the test if wandb is available. """ return require_wandb(test_case, required=False) def require_torch_multi_gpu(test_case): """ Decorator marking a test that requires multiple GPUs. Skips the test if there aren't enough GPUs. """ if torch.cuda.device_count() < 2: test_case = unittest.skip("test requires multiple GPUs")(test_case) return test_case def require_torch_gpu(test_case): """ Decorator marking a test that requires GPUs. Skips the test if there is no GPU. """ if not torch.cuda.is_available(): test_case = unittest.skip("test requires GPU")(test_case) return test_case def require_torch_multi_xpu(test_case): """ Decorator marking a test that requires multiple XPUs. Skips the test if there aren't enough XPUs. """ if torch.xpu.device_count() < 2 and is_xpu_available(): test_case = unittest.skip("test requires multiple XPUs")(test_case) return test_case
trl/tests/testing_utils.py/0
{ "file_path": "trl/tests/testing_utils.py", "repo_id": "trl", "token_count": 1062 }
447
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ State dict utilities: utility methods for converting state dicts easily File copied from diffusers to avoid import issues and make TRL compatible with most of diffusers versions. """ import enum class StateDictType(enum.Enum): """ The mode to use when converting state dicts. """ DIFFUSERS_OLD = "diffusers_old" PEFT = "peft" PEFT_TO_DIFFUSERS = { ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", "to_k.lora_A": "to_k.lora.down", "to_k.lora_B": "to_k.lora.up", "to_q.lora_A": "to_q.lora.down", "to_q.lora_B": "to_q.lora.up", "to_v.lora_A": "to_v.lora.down", "to_v.lora_B": "to_v.lora.up", "to_out.0.lora_A": "to_out.0.lora.down", "to_out.0.lora_B": "to_out.0.lora.up", } DIFFUSERS_OLD_TO_DIFFUSERS = { ".to_q_lora.up": ".q_proj.lora_linear_layer.up", ".to_q_lora.down": ".q_proj.lora_linear_layer.down", ".to_k_lora.up": ".k_proj.lora_linear_layer.up", ".to_k_lora.down": ".k_proj.lora_linear_layer.down", ".to_v_lora.up": ".v_proj.lora_linear_layer.up", ".to_v_lora.down": ".v_proj.lora_linear_layer.down", ".to_out_lora.up": ".out_proj.lora_linear_layer.up", ".to_out_lora.down": ".out_proj.lora_linear_layer.down", } DIFFUSERS_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS, } KEYS_TO_ALWAYS_REPLACE = { ".processor.": ".", } def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping)
trl/trl/models/sd_utils.py/0
{ "file_path": "trl/trl/models/sd_utils.py", "repo_id": "trl", "token_count": 2507 }
448
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import warnings from collections import deque from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from accelerate import PartialState from rich.console import Console, Group from rich.live import Live from rich.panel import Panel from rich.progress import Progress from torch.nn.utils.rnn import pad_sequence from torch.utils.data import IterableDataset from transformers import BitsAndBytesConfig, DataCollatorForLanguageModeling, PreTrainedTokenizerBase from transformers.trainer import TrainerCallback from transformers.trainer_utils import has_length from ..import_utils import is_peft_available, is_unsloth_available, is_xpu_available from ..trainer.model_config import ModelConfig if is_peft_available(): from peft import LoraConfig, PeftConfig class AdaptiveKLController: """ Adaptive KL controller described in the paper: https://arxiv.org/pdf/1909.08593.pdf """ def __init__(self, init_kl_coef, target, horizon): self.value = init_kl_coef self.target = target self.horizon = horizon def update(self, current, n_steps): target = self.target proportional_error = np.clip(current / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: """Fixed KL controller.""" def __init__(self, kl_coef): self.value = kl_coef def update(self, current, n_steps): pass class DataCollatorForCompletionOnlyLM(DataCollatorForLanguageModeling): """ Data collator used for completion tasks. It ensures that all the tokens of the labels are set to an 'ignore_index' when they do not come from the assistant. This ensure that the loss is only calculated on the completion made by the assistant. Args: response_template (`Union[str, List[int]]`): the template form that indicates the start of the response, typically something like '### Response:\n'. It can also be passed as tokenized ids, which can be useful when using a tokenizer that encodes the response differently if it does not have proper context. instruction_template (`Union[str, List[int]]`): the template form that indicates the start of the human instruction, typically something like '### Human:\n'. Useful for assistant-style conversation datasets. It can also be passed as tokenized ids. mlm (`bool`, *optional*, defaults to `False`): Whether or not to use masked language modeling in the underlying `DataCollatorForLanguageModeling` class. Note that this option currently has no effect but is present for flexibility and backwards-compatibility. ignore_index (`int`, *optional*, defaults to `-100`): The index to use to ignore the initial tokens with """ def __init__( self, response_template: Union[str, List[int]], instruction_template: Optional[Union[str, List[int]]] = None, *args, mlm: bool = False, ignore_index: int = -100, **kwargs, ): super().__init__(*args, mlm=mlm, **kwargs) self.instruction_template = instruction_template if isinstance(instruction_template, str): # The user provides a string, must tokenize self.instruction_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False) else: # The user already provides the token ids self.instruction_token_ids = instruction_template self.response_template = response_template if isinstance(response_template, str): # The user provides a string, must tokenize self.response_token_ids = self.tokenizer.encode(self.response_template, add_special_tokens=False) else: # The user already provides the token ids self.response_token_ids = response_template if not self.mlm and self.instruction_template and self.tokenizer.pad_token_id == self.tokenizer.eos_token_id: warnings.warn( "The pad_token_id and eos_token_id values of this tokenizer are identical. " "If you are planning for multi-turn training, " "it can result in the model continuously generating questions and answers without eos token. " "To avoid this, set the pad_token_id to a different value." ) self.ignore_index = ignore_index def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]: batch = super().torch_call(examples) if self.instruction_template is None: for i in range(len(examples)): response_token_ids_start_idx = None for idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # `response_token_ids` is `'### Response:\n'`, here we are just making sure that the token IDs match if ( self.response_token_ids == batch["labels"][i][idx : idx + len(self.response_token_ids)].tolist() ): response_token_ids_start_idx = idx if response_token_ids_start_idx is None: warnings.warn( f"Could not find response key `{self.response_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index else: response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) # Make pytorch loss function ignore all tokens up through the end of the response key batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index else: for i in range(len(examples)): response_token_ids_idxs = [] human_token_ids_idxs = [] for assistant_idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # find the indexes of the start of a response. if ( self.response_token_ids == batch["labels"][i][assistant_idx : assistant_idx + len(self.response_token_ids)].tolist() ): response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids)) if len(response_token_ids_idxs) == 0: warnings.warn( f"Could not find response key `{self.response_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index human_token_ids = self.instruction_token_ids for human_idx in np.where(batch["labels"][i] == human_token_ids[0])[0]: # find the indexes of the start of a human answer. if human_token_ids == batch["labels"][i][human_idx : human_idx + len(human_token_ids)].tolist(): human_token_ids_idxs.append(human_idx) if len(human_token_ids_idxs) == 0: warnings.warn( f"Could not find instruction key `{self.instruction_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index if ( len(human_token_ids_idxs) > 0 and len(response_token_ids_idxs) > 0 and human_token_ids_idxs[0] > response_token_ids_idxs[0] ): human_token_ids_idxs = [0] + human_token_ids_idxs for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): # Make pytorch loss function ignore all non response tokens if idx != 0: batch["labels"][i, start:end] = self.ignore_index else: batch["labels"][i, :end] = self.ignore_index if len(response_token_ids_idxs) < len(human_token_ids_idxs): batch["labels"][i, human_token_ids_idxs[-1] :] = self.ignore_index return batch @dataclass class RewardDataCollatorWithPadding: r""" Reward DataCollator class that pads the inputs to the maximum length of the batch. Args: tokenizer (`PreTrainedTokenizerBase`): The tokenizer used for encoding the data. padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`): padding_strategy to pass to the tokenizer. max_length (`Optional[int]`, `optional`, defaults to `None`): The maximum length of the sequence to be processed. pad_to_multiple_of (`Optional[int]`, `optional`, defaults to `None`): If set will pad the sequence to a multiple of the provided value. return_tensors (`str`, `optional`, defaults to `"pt"`): The tensor type to use. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: features_chosen = [] features_rejected = [] margin = [] # check if we have a margin. If we do, we need to batch it as well has_margin = "margin" in features[0] for feature in features: # check if the keys are named as expected if ( "input_ids_chosen" not in feature or "input_ids_rejected" not in feature or "attention_mask_chosen" not in feature or "attention_mask_rejected" not in feature ): raise ValueError( "The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`" ) features_chosen.append( { "input_ids": feature["input_ids_chosen"], "attention_mask": feature["attention_mask_chosen"], } ) features_rejected.append( { "input_ids": feature["input_ids_rejected"], "attention_mask": feature["attention_mask_rejected"], } ) if has_margin: margin.append(feature["margin"]) batch_chosen = self.tokenizer.pad( features_chosen, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_rejected = self.tokenizer.pad( features_rejected, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch = { "input_ids_chosen": batch_chosen["input_ids"], "attention_mask_chosen": batch_chosen["attention_mask"], "input_ids_rejected": batch_rejected["input_ids"], "attention_mask_rejected": batch_rejected["attention_mask"], "return_loss": True, } if has_margin: margin = torch.tensor(margin, dtype=torch.float) batch["margin"] = margin return batch @dataclass class DPODataCollatorWithPadding: r""" DPO DataCollator class that pads the tokenized inputs to the maximum length of the batch. Args: pad_token_id (`int` defaults to 0): The tokenizer's pad_token_id. label_pad_token_id (`int`, defaults to -100): The label used for masking. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): Whether or not you model has an encoder_decoder architecture. """ pad_token_id: int = 0 label_pad_token_id: int = -100 is_encoder_decoder: Optional[bool] = False def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: # first, pad everything to the same length padded_batch = {} for k in features[0].keys(): if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"): if self.is_encoder_decoder: to_pad = [torch.LongTensor(ex[k]) for ex in features] if (k.startswith("prompt")) and (k.endswith("input_ids")): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 elif k.startswith(("chosen", "rejected", "completion")) or ("decoder" in k): padding_value = self.label_pad_token_id else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) else: # adapted from https://stackoverflow.com/questions/73256206 if "prompt" in k: to_pad = [torch.LongTensor(ex[k][::-1]) for ex in features] else: to_pad = [torch.LongTensor(ex[k]) for ex in features] if k.endswith("_input_ids"): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_labels"): padding_value = self.label_pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) # for the prompt, flip back so padding is on left side if "prompt" in k: padded_batch[k] = padded_batch[k].flip(dims=[1]) elif k.endswith("_logps"): # the cached reference model logprobs padded_batch[k] = torch.tensor([ex[k] for ex in features]) else: padded_batch[k] = [ex[k] for ex in features] return padded_batch class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. The dataset also formats the text before tokenization with a specific format that is provided by the user. Args: tokenizer (`transformers.PreTrainedTokenizer`): The processor used for processing the data. dataset (`dataset.Dataset`): Dataset with text files. dataset_text_field (`str`, **optional**): Name of the field in the dataset that contains the text. Used only if `formatting_func` is `None`. formatting_func (`Callable`, **optional**): Function that formats the text before tokenization. Usually it is recommended to have follows a certain pattern such as `"### Question: {question} ### Answer: {answer}"` infinite (`bool`, *optional*, defaults to `False`): If True the iterator is reset after dataset reaches end else stops. seq_length (`int`, *optional*, defaults to `1024`): Length of token sequences to return. num_of_sequences (`int`, *optional*, defaults to `1024`): Number of token sequences to keep in buffer. chars_per_token (`int`, *optional*, defaults to `3.6`): Number of characters per token used to estimate number of tokens in text buffer. eos_token_id (`int`, *optional*, defaults to `0`): Id of the end of sequence token if the passed tokenizer does not have an EOS token. shuffle ('bool', *optional*, defaults to True) Shuffle the examples before they are returned append_concat_token ('bool', *optional*, defaults to True) If true, appends `eos_token_id` at the end of each sample being packed. add_special_tokens ('bool', *optional*, defaults to True) If true, tokenizers adds special tokens to each sample being packed. """ def __init__( self, tokenizer, dataset, dataset_text_field=None, formatting_func=None, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, eos_token_id=0, shuffle=True, append_concat_token=True, add_special_tokens=True, ): self.tokenizer = tokenizer if tokenizer.eos_token_id is None: warnings.warn( "The passed tokenizer does not have an EOS token. We will use the passed eos_token_id instead which corresponds" f" to {eos_token_id}. If this is not the correct EOS token, make sure to pass the correct eos_token_id." ) self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else eos_token_id self.dataset = dataset self.seq_length = seq_length self.infinite = infinite self.current_size = 0 self.max_buffer_size = seq_length * chars_per_token * num_of_sequences self.shuffle = shuffle self.append_concat_token = append_concat_token self.add_special_tokens = add_special_tokens if formatting_func is None: self.formatting_func = lambda x: x[dataset_text_field] else: self.formatting_func = formatting_func if formatting_func is not None: if formatting_func.__code__.co_argcount > 1: warnings.warn( "The passed formatting_func has more than one argument. Usually that function should have a single argument `example`" " which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing." ) def __len__(self): return len(self.dataset) def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(self.formatting_func(next(iterator))) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) warnings.warn("The dataset reached end and the iterator is reset to the start.") else: more_examples = False break tokenized_inputs = self.tokenizer(buffer, add_special_tokens=self.add_special_tokens, truncation=False)[ "input_ids" ] all_token_ids = [] for tokenized_input in tokenized_inputs: if self.append_concat_token: tokenized_input = tokenized_input + [self.concat_token_id] all_token_ids.extend(tokenized_input) examples = [] for i in range(0, len(all_token_ids), self.seq_length): input_ids = all_token_ids[i : i + self.seq_length] if len(input_ids) == self.seq_length: examples.append(input_ids) if self.shuffle: random.shuffle(examples) for example in examples: self.current_size += 1 yield { "input_ids": torch.LongTensor(example), "labels": torch.LongTensor(example), } class RunningMoments: def __init__(self, accelerator): """ Calculates the running mean and standard deviation of a data stream. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L75 """ self.mean = 0 self.std = 1 self.var = 1 self.count = 1e-24 self.accelerator = accelerator @torch.no_grad() def update(self, xs: torch.Tensor) -> Tuple[float, float]: """ Updates running moments from batch's moments computed across ranks """ if self.accelerator.use_distributed: xs_mean, xs_var, xs_count = get_global_statistics(self.accelerator, xs) else: xs_count = xs.numel() xs_var, xs_mean = torch.var_mean(xs, unbiased=False) xs_mean, xs_var = xs_mean.float(), xs_var.float() delta = xs_mean - self.mean tot_count = self.count + xs_count new_sum = xs_var * xs_count # correct old_sum deviation accounting for the new mean old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count tot_sum = old_sum + new_sum self.mean += delta * xs_count / tot_count self.var = tot_sum / tot_count self.std = (self.var * tot_count / (tot_count - 1)).float().sqrt() self.count = tot_count return xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item() @torch.no_grad() def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device="cpu") -> Tuple[float, float, int]: """ Computes element-wise mean and variance of the tensor across processes. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75 """ xs = xs.to(accelerator.device) sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device) sum_and_count = accelerator.reduce(sum_and_count) global_sum, count = sum_and_count global_mean = global_sum / count sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask)) sum_var = accelerator.reduce(sum_var) global_var = sum_var / count return global_mean.to(device), global_var.to(device), count.to(device) def compute_accuracy(eval_pred) -> Dict[str, float]: predictions, labels = eval_pred # Here, predictions is rewards_chosen and rewards_rejected. # We want to see how much of the time rewards_chosen > rewards_rejected. if np.array(predictions[:, 0] == predictions[:, 1], dtype=float).sum() > 0: warnings.warn( f"There are {np.array(predictions[:, 0] == predictions[:, 1]).sum()} out of {len(predictions[:, 0])} instances where the predictions for both options are equal. As a consequence the accuracy can be misleading." ) predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {"accuracy": accuracy} def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor: if tensor.size(dim) >= length: return tensor else: pad_size = list(tensor.shape) pad_size[dim] = length - tensor.size(dim) return torch.cat( [ tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device), ], dim=dim, ) def disable_dropout_in_model(model: torch.nn.Module) -> None: for module in model.modules(): if isinstance(module, torch.nn.Dropout): module.p = 0 def exact_div(a, b, a_str, b_str, custom_error_message=""): q = a // b if a != q * b: raise ValueError(f"{custom_error_message}, {a_str}={a}, {b_str}={b}, inexact division: {a} / {b} = {a / b}") return q # copied from https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/stat_tracking.py#L5 class PerPromptStatTracker: r""" Class for tracking statistics per prompt. Mainly used to calculate advantage for the DPPO algorithm Args: buffer_size (`int`): Size of the buffer to keep for each prompt. min_count (`int`): Minimum number of samples to keep in the buffer before calculating the mean and std. """ def __init__(self, buffer_size, min_count): self.buffer_size = buffer_size self.min_count = min_count self.stats = {} def update(self, prompts, rewards): prompts = np.array(prompts) rewards = np.array(rewards) unique = np.unique(prompts) advantages = np.empty_like(rewards) for prompt in unique: prompt_rewards = rewards[prompts == prompt] if prompt not in self.stats: self.stats[prompt] = deque(maxlen=self.buffer_size) self.stats[prompt].extend(prompt_rewards) if len(self.stats[prompt]) < self.min_count: mean = np.mean(rewards) std = np.std(rewards) + 1e-6 else: mean = np.mean(self.stats[prompt]) std = np.std(self.stats[prompt]) + 1e-6 advantages[prompts == prompt] = (prompt_rewards - mean) / std return advantages def get_stats(self): return {k: {"mean": np.mean(v), "std": np.std(v), "count": len(v)} for k, v in self.stats.items()} def neftune_post_forward_hook(module, input, output): """ Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding layers. This method is slightly adapted from the original source code that can be found here: https://github.com/neelsjain/NEFTune Simply add it to your model as follows: ```python model = ... model.embed_tokens.neftune_noise_alpha = 0.1 model.embed_tokens.register_forward_hook(neftune_post_forward_hook) ``` Args: module (`torch.nn.Module`): The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to the desired noise alpha value. input (`torch.Tensor`): The input tensor to the model. output (`torch.Tensor`): The output tensor of the model (i.e. the embeddings). """ if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output def peft_module_casting_to_bf16(model): from peft.tuners.tuners_utils import BaseTunerLayer for name, module in model.named_modules(): if isinstance(module, BaseTunerLayer): module = module.to(torch.bfloat16) elif isinstance(module, torch.nn.LayerNorm) or "norm" in name: module = module.to(torch.float32) elif any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]): if hasattr(module, "weight"): if module.weight.dtype == torch.float32: module = module.to(torch.bfloat16) def trl_sanitze_kwargs_for_tagging(model, tag_names, kwargs=None): if is_unsloth_available(): # Unsloth adds a new attribute in the model config `unsloth_version` # to keep track of models that have been patched with unsloth. if hasattr(model, "config") and getattr(model.config, "unsloth_version", None) is not None: tag_names.append("unsloth") if kwargs is not None: if "tags" not in kwargs: kwargs["tags"] = tag_names elif "tags" in kwargs and isinstance(kwargs["tags"], list): kwargs["tags"].extend(tag_names) elif "tags" in kwargs and isinstance(kwargs["tags"], str): tag_names.append(kwargs["tags"]) kwargs["tags"] = tag_names return kwargs def get_quantization_config(model_config: ModelConfig) -> Optional[BitsAndBytesConfig]: if model_config.load_in_4bit: quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=model_config.torch_dtype, # For consistency with model weights, we use the same value as `torch_dtype` bnb_4bit_quant_type=model_config.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_config.use_bnb_nested_quant, ) elif model_config.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_8bit=True, ) else: quantization_config = None return quantization_config def get_kbit_device_map() -> Optional[Dict[str, int]]: if is_xpu_available(): return {"": f"xpu:{PartialState().local_process_index}"} elif torch.cuda.is_available(): return {"": PartialState().local_process_index} else: return None def get_peft_config(model_config: ModelConfig) -> "Optional[PeftConfig]": if model_config.use_peft is False: return None if not is_peft_available(): raise ValueError( "You need to have PEFT library installed in your environment, make sure to install `peft`. " "Make sure to run `pip install -U peft`." ) peft_config = LoraConfig( r=model_config.lora_r, lora_alpha=model_config.lora_alpha, lora_dropout=model_config.lora_dropout, bias="none", task_type=model_config.lora_task_type, target_modules=model_config.lora_target_modules, modules_to_save=model_config.lora_modules_to_save, ) return peft_config class RichProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation using Rich. """ def __init__(self): self.training_bar = Progress() self.prediction_bar = Progress() self.training_task_id = None self.prediction_task_id = None self.rich_group = None self.rich_console = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.rich_console = Console() self.training_status = self.rich_console.status("Nothing to log yet ...") self.rich_group = Live(Panel(Group(self.training_bar, self.prediction_bar, self.training_status))) self.rich_group.start() # self.training_bar.start() self.training_task_id = self.training_bar.add_task("[blue]Training the model", total=state.max_steps) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(self.training_task_id, advance=state.global_step - self.current_step, update=True) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: # self.prediction_bar.start() self.prediction_task_id = self.prediction_bar.add_task( "[blue]Predicting on the evaluation dataset", total=state.max_steps ) self.prediction_bar.update(self.prediction_task_id, advance=1, update=True) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.stop() self.prediction_bar.remove_task(self.prediction_task_id) self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: _ = logs.pop("total_flos", None) self.training_status.update(f"[bold green]Status = {str(logs)}") def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.stop() self.rich_group.stop() self.training_bar = None
trl/trl/trainer/utils.py/0
{ "file_path": "trl/trl/trainer/utils.py", "repo_id": "trl", "token_count": 15913 }
449
# Builds CPU-only Docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 FROM python:3.8-slim as compile-image ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt-get install -y --no-install-recommends \ build-essential \ git \ gcc # Setup virtual environment for Docker ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv ${VIRTUAL_ENV} # Make sure we use the virtualenv ENV PATH="${VIRTUAL_ENV}/bin:$PATH" WORKDIR /workspace # Install specific CPU torch wheel to save on space RUN python3 -m pip install --upgrade --no-cache-dir pip RUN python3 -m pip install --no-cache-dir \ jupyter \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cpu # Stage 2 FROM python:3.8-slim AS build-image COPY --from=compile-image /opt/venv /opt/venv RUN useradd -ms /bin/bash user USER user # Make sure we use the virtualenv ENV PATH="/opt/venv/bin:$PATH" CMD ["/bin/bash"]
accelerate/docker/accelerate-cpu/Dockerfile/0
{ "file_path": "accelerate/docker/accelerate-cpu/Dockerfile", "repo_id": "accelerate", "token_count": 380 }
0
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Accelerate's internal mechanisms Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which kind of distributed setup is used, how many different processes there are and which one the current script is in. All that information is stored in the [`~AcceleratorState`]. This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of [`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits) Then, when calling [`~Accelerator.prepare`], the library: - wraps your model(s) in the container adapted for the distributed setup, - wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`], - wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`] - creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`] While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other `num_processes` batches (if enabled). The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality: - it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any randomization (like shuffling) is done the exact same way across processes. - it puts the batches on the proper device before yielding them (unless you have opted out of `device_placement=True`). The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level. The random number generator synchronization will by default synchronize: - the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6 - the main random number generator in PyTorch <=1.5.1 You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main [`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid setting the same seed in the main random number generator in all processes. <Tip warning={true}> Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get the same random numbers from the torch random modules (so will apply the same random data augmentation if it's controlled by torch). </Tip> <Tip> The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example. </Tip> For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
accelerate/docs/source/concept_guides/internal_mechanism.md/0
{ "file_path": "accelerate/docs/source/concept_guides/internal_mechanism.md", "repo_id": "accelerate", "token_count": 1096 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Megatron-LM [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale. It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder). For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM). ## What is integrated? Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder): a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks. Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path. For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP, please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP. It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. This increases the batch size that can be supported for training. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks (versus the traditional method of replicating the optimizer state across data parallel ranks). For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory. This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs. For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of 🤗 blog [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism). e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing. It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation. For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer. PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition. g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format. h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable tensor and pipeline parallel sizes to the beloved 🤗 Transformers sharded checkpoints as it has great support with plethora of tools such as 🤗 Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. Support is also available for converting 🤗 Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes for large scale training. ## Pre-Requisites You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library. See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC. Below is a step-by-step method to set up the conda environment: 1. Create a virtual environment ``` conda create --name ml ``` 2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version ``` conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch ``` 3. Install Nvidia APEX ``` git clone https://github.com/NVIDIA/apex cd apex pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ cd .. ``` 4. Installing Megatron-LM ``` pip install git+https://github.com/huggingface/Megatron-LM.git ``` ## Accelerate Megatron-LM Plugin Important features are directly supported via the `accelerate config` command. An example of the corresponding questions for using Megatron-LM features is shown below: ```bash :~$ accelerate config --config_file "megatron_gpt_config.yaml" In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0 Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): 2 How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you want to use DeepSpeed? [yes/NO]: Do you want to use FullyShardedDataParallel? [yes/NO]: Do you want to use Megatron-LM ? [yes/NO]: yes What is the Tensor Parallelism degree/size? [1]:2 Do you want to enable Sequence Parallelism? [YES/no]: What is the Pipeline Parallelism degree/size? [1]:2 What is the number of micro-batches? [1]:2 Do you want to enable selective activation recomputation? [YES/no]: Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]: What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: How many GPU(s) should be used for distributed training? [1]:4 Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16 ``` The resulting config is shown below: ``` ~$ cat megatron_gpt_config.yaml compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: MEGATRON_LM downcast_bf16: 'no' fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: megatron_lm_gradient_clipping: 1.0 megatron_lm_num_micro_batches: 2 megatron_lm_pp_degree: 2 megatron_lm_recompute_activations: true megatron_lm_sequence_parallelism: true megatron_lm_tp_degree: 2 megatron_lm_use_distributed_optimizer: true mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` to use Megatron-LM are as follows: 1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used. As such, support for only the Megatron-LM's scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`. Example is given below: ```python from accelerate.utils import MegatronLMDummyScheduler if accelerator.distributed_type == DistributedType.MEGATRON_LM: lr_scheduler = MegatronLMDummyScheduler( optimizer=optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps, ) else: lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) ``` 2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes. Example of getting the effective total batch size is shown below: ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size else: total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ``` 3. When using Megatron-LM, the losses are already averaged across the data parallel group ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses.append(loss) else: losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses = torch.tensor(losses) else: losses = torch.cat(losses) ``` 4. For Megatron-LM, we need to save the model using `accelerator.save_state` ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: accelerator.save_state(args.output_dir) else: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) ``` That's it! We are good to go 🚀. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`. Let's run it for `gpt-large` model architecture using 4 A100-80GB GPUs. ```bash accelerate launch --config_file megatron_gpt_config.yaml \ examples/by_feature/megatron_lm_gpt_pretraining.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --block_size 1024 \ --learning_rate 5e-5 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 5 \ --with_tracking \ --report_to "wandb" \ --output_dir "awesome_model" ``` Below are some important excerpts from the output logs: ```bash Loading extension module fused_dense_cuda... >>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) Building gpt model in the pre-training mode. The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup. Preparing dataloader Preparing dataloader Preparing model > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120 Preparing optimizer Preparing scheduler > learning rate decay style: linear 10/10/2022 22:57:22 - INFO - __main__ - ***** Running training ***** 10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318 10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5 10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24 10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48 10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1 10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245 20%|████████████▍ | 49/245 [01:04<04:09, 1.27s/it] 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209 40%|████████████████████████▊ | 98/245 [02:10<03:07, 1.28s/it] 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359 60%|████████████████████████████████████▌ | 147/245 [03:16<02:05, 1.28s/it] 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344 80%|████████████████████████████████████████████████▊ | 196/245 [04:22<01:02, 1.28s/it] 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874 100%|█████████████████████████████████████████████████████████████| 245/245 [05:27<00:00, 1.28s/it] ``` There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`. ## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets For leveraging more features, please go through below details. 1. Below is an example of changes required to customize the Train Step while using Megatron-LM. You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children `accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`. ```python from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group # Custom loss function for the Megatron model class GPTTrainStepWithCustomLoss(GPTTrainStep): def __init__(self, megatron_args, **kwargs): super().__init__(megatron_args) self.kwargs = kwargs def get_loss_func(self): def loss_func(inputs, loss_mask, output_tensor): batch_size, seq_length = output_tensor.shape losses = output_tensor.float() loss_mask = loss_mask.view(-1).float() loss = losses.view(-1) * loss_mask # Resize and average loss per sample loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1) loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1) loss_per_sample = loss_per_sample / loss_mask_per_sample # Calculate and scale weighting weights = torch.stack([(inputs == kt).float() for kt in self.kwargs["keytoken_ids"]]).sum(axis=[0, 2]) weights = 1.0 + self.kwargs["alpha"] * weights # Calculate weighted average weighted_loss = (loss_per_sample * weights).mean() # Reduce loss across data parallel groups averaged_loss = avg_losses_across_data_parallel_group([weighted_loss]) return weighted_loss, {"lm loss": averaged_loss[0]} return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) output_tensor = model(tokens, position_ids, attention_mask, labels=labels) return output_tensor, partial(self.loss_func, tokens, loss_mask) return forward_step def main(): # Custom loss function for the Megatron model keytoken_ids = [] keywords = ["plt", "pd", "sk", "fit", "predict", " plt", " pd", " sk", " fit", " predict"] for keyword in keywords: ids = tokenizer([keyword]).input_ids[0] if len(ids) == 1: keytoken_ids.append(ids[0]) accelerator.print(f"Keytoken ids: {keytoken_ids}") accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = { "keytoken_ids": keytoken_ids, "alpha": 0.25, } ``` 2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be available and this requires tweaks to the training loop. Being able to do all this shows how flexible and extensible 🤗 Accelerate is. The changes required are as follows. a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` and pass the required dataset args to it such as `data_path`, `seq_length` etc. See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. ```python from accelerate.utils import MegatronLMDummyDataLoader megatron_dataloader_config = { "data_path": args.data_path, "splits_string": args.splits_string, "seq_length": args.block_size, "micro_batch_size": args.per_device_train_batch_size, } megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config) accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True ``` b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders as per the `args.splits_string` proportions ```python model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare( model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader ) ``` c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0 So, we need to iterate only if the dataloader isn't `None` else provide empty dict As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps` This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets. This displays how flexible and extensible 🤗 Accelerate is. ```python while completed_steps < args.max_train_steps: model.train() batch = next(train_dataloader) if train_dataloader is not None else {} outputs = model(**batch) loss = outputs.loss ... if completed_steps % eval_interval == 0: eval_completed_steps = 0 losses = [] while eval_completed_steps < eval_iters: model.eval() with torch.no_grad(): batch = next(eval_dataloader) if eval_dataloader is not None else {} outputs = model(**batch) ``` ## Utility for Checkpoint reshaping and interoperability 1. The scripts for these are present in 🤗 Transformers library under respective models. Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py) 2. Below is an example of conversion of checkpoint from Megatron-LM to universal 🤗 Transformers sharded checkpoint. ```bash python checkpoint_reshaping_and_interoperability.py \ --convert_checkpoint_from_megatron_to_transformers \ --load_path "gpt/iter_0005000" \ --save_path "gpt/trfs_checkpoint" \ --max_shard_size "200MB" \ --tokenizer_name "gpt2" \ --print-checkpoint-structure ``` 3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`. ```bash python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \ --load_path "gpt/trfs_checkpoint" \ --save_path "gpt/megatron_lm_checkpoint" \ --target_tensor_model_parallel_size 2 \ --target_pipeline_model_parallel_size 2 \ --target_data_parallel_size 2 \ --target_params_dtype "bf16" \ --make_vocab_size_divisible_by 128 \ --use_distributed_optimizer \ --print-checkpoint-structure ``` ## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation 1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. These would be available on the in the last stage of pipeline. ```python megatron_lm_plugin = MegatronLMPlugin(return_logits=True) ``` 2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism for generation (already does key-value caching and uses fused kernels by default). This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled. It also requires specifying path to tokenizer's vocab file and merges file. Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model. ```python # specifying tokenizer's vocab and merges file vocab_file = os.path.join(args.resume_from_checkpoint, "vocab.json") merge_file = os.path.join(args.resume_from_checkpoint, "merges.txt") other_megatron_args = {"vocab_file": vocab_file, "merge_file": merge_file} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) # inference using `megatron_generate` functionality tokenizer.pad_token = tokenizer.eos_token max_new_tokens = 64 batch_texts = [ "Are you human?", "The purpose of life is", "The arsenal was constructed at the request of", "How are you doing these days?", ] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) # top-p sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_p=0.8, top_p_decay=0.5, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # top-k sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_k=50, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # adding `bos` token at the start generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, add_BOS=True ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # beam search => only takes single prompt batch_texts = ["The purpose of life is"] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, num_beams=20, length_penalty=1.5, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) ``` 3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at [megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml). The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh). The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log). ## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention 1. For ROPE/ALiBi attention, pass `position_embedding_type` with `("absolute" | "rotary" | "alibi")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"position_embedding_type": "alibi"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` 2. For Multi-Query Attention, pass `attention_head_type` with `("multihead" | "multiquery")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"attention_head_type": "multiquery"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` ## Caveats 1. Supports Transformers GPT2, Megatron-BERT and T5 models. This covers Decoder only, Encode only and Encoder-Decoder model classes. 2. Only loss is returned from model forward pass as there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes. The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks. This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and you can easily compute the `perplexity` using the loss. For GPT model, returning logits in addition to loss(es) is supported. These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups` to gather logits across data parallel ranks. These logits along with labels can be used for computing various performance metrics. 3. The main process is the last rank as the losses/logits are available in the last stage of pipeline. `accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using Megatron-LM integration. 4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions. 5. Currently, checkpoint reshaping and interoperability support is only available for GPT. Soon it will be extended to BERT and T5. 6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism setting is synonymous with gradient accumulation. 7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints. 8. Below are the mapping from Megatron-LM model architectures to the the equivalent 🤗 transformers model architectures. Only these 🤗 transformers model architectures are supported. a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : 🤗 transformers models with `megatron-bert` in config's model type, e.g., [MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert) b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : 🤗 transformers models with `gpt2` in config's model type, e.g., [OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2) c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : 🤗 transformers models with `t5` in config's model type, e.g., [T5](https://huggingface.co/docs/transformers/model_doc/t5) and [MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
accelerate/docs/source/usage_guides/megatron_lm.md/0
{ "file_path": "accelerate/docs/source/usage_guides/megatron_lm.md", "repo_id": "accelerate", "token_count": 9557 }
2
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate import PartialState, prepare_pippy # sdpa implementation which is the default torch>2.1.2 fails with the tracing + attention mask kwarg # with attn_implementation="eager" mode, the forward is very slow for some reason model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-chat-hf", low_cpu_mem_usage=True, attn_implementation="sdpa" ) model.eval() # Input configs # Create example inputs for the model tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") prompts = ("I would like to", "I really like to", "The weather is") # bs = 3 tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(prompts, return_tensors="pt", padding=True) # Create a pipeline stage from the model # Using `auto` is equivalent to letting `device_map="auto"` figure # out device mapping and will also split the model according to the # number of total GPUs available if it fits on one GPU model = prepare_pippy(model, split_points="auto", example_args=inputs) # You can pass `gather_output=True` to have the output from the model # available on all GPUs # model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True) # currently we don't support `model.generate` # output = model.generate(**inputs, max_new_tokens=1) with torch.no_grad(): output = model(**inputs) # The outputs are only on the final process by default if PartialState().is_last_process: next_token_logits = output[0][:, -1, :] next_token = torch.argmax(next_token_logits, dim=-1) print(tokenizer.batch_decode(next_token))
accelerate/examples/inference/llama.py/0
{ "file_path": "accelerate/examples/inference/llama.py", "repo_id": "accelerate", "token_count": 695 }
3
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import contextlib import functools import json import math import os import re import shutil import sys import warnings from collections import OrderedDict from contextlib import contextmanager from functools import partial from types import MethodType from typing import Any, Callable, Union import torch import torch.utils.hooks as hooks from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches from .hooks import AlignDevicesHook from .logging import get_logger from .optimizer import AcceleratedOptimizer from .scheduler import AcceleratedScheduler from .state import AcceleratorState, GradientState, PartialState from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers from .utils import ( MODEL_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, AutocastKwargs, DataLoaderConfiguration, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FP8RecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, TorchDynamoPlugin, check_os_kernel, clean_state_dict_for_safetensors, compare_versions, convert_model, convert_outputs_to_fp32, extract_model_from_parallel, gather, gather_object, get_mixed_precision_context_manager, get_pretty_name, has_transformer_engine_layers, is_bf16_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_megatron_lm_available, is_mlu_available, is_msamp_available, is_npu_available, is_torch_version, is_torch_xla_available, is_xpu_available, load_fsdp_model, load_fsdp_optimizer, pad_across_processes, parse_choice_from_env, recursively_apply, reduce, release_memory, save, save_fsdp_model, save_fsdp_optimizer, shard_checkpoint, wait_for_everyone, ) from .utils.constants import FSDP_PYTORCH_VERSION from .utils.modeling import get_state_dict_offloaded_model from .utils.other import is_compiled_module if is_deepspeed_available(): from .utils import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, ) if is_fp8_available(): import transformer_engine.common.recipe as te_recipe from transformer_engine.pytorch import fp8_autocast if is_megatron_lm_available(): from .utils import ( MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, megatron_lm_initialize, megatron_lm_prepare_data_loader, megatron_lm_prepare_model, megatron_lm_prepare_optimizer, megatron_lm_prepare_scheduler, ) from torch.distributed.algorithms.join import Join if is_torch_xla_available(): import torch_xla.amp as xamp import torch_xla.core.xla_model as xm import torch_xla.distributed.xla_multiprocessing as xmp if is_npu_available(check_device=False): import torch_npu # noqa: F401 try: from torch.optim.lr_scheduler import LRScheduler except ImportError: from torch.optim.lr_scheduler import _LRScheduler as LRScheduler logger = get_logger(__name__) # Sentinel values for defaults _split_batches = object() _dispatch_batches = object() _even_batches = object() _use_seedable_sampler = object() class Accelerator: """ Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. Args: device_placement (`bool`, *optional*, defaults to `True`): Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, etc...). mixed_precision (`str`, *optional*): Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8' requires the installation of transformers-engine. gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. If not passed, will default to the value in the environment variable `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. dataloader_config (`DataLoaderConfiguration`, *optional*): A configuration for how the dataloaders should be handled in distributed scenarios. deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*): Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured directly using *accelerate config* fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*): Tweak your FSDP related args using this argument. This argument is optional and can be configured directly using *accelerate config* megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*): Tweak your MegatronLM related args using this argument. This argument is optional and can be configured directly using *accelerate config* rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration in your prepared dataloaders. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): A list of loggers to be setup for experiment tracking. Should be one or several of: - `"all"` - `"tensorboard"` - `"wandb"` - `"comet_ml"` If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. project_config ([`~utils.ProjectConfiguration`], *optional*): A configuration for how saving the state can be handled. project_dir (`str`, `os.PathLike`, *optional*): A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved checkpoints. step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`): Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only done under certain circumstances (at the end of each epoch, for instance). kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*) A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed precision are created. See [kwargs](kwargs) for more information. dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`): Set to one of the possible dynamo backends to optimize your training with torch dynamo. gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*): A configuration for how gradient accumulation should be handled, if more tweaking than just the `gradient_accumulation_steps` is needed. **Available attributes:** - **device** (`torch.device`) -- The device to use. - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. - **local_process_index** (`int`) -- The process index on the current machine. - **mixed_precision** (`str`) -- The configured mixed precision mode. - **num_processes** (`int`) -- The total number of processes used for training. - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which case the learning rate should not be changed. - **process_index** (`int`) -- The overall index of the current process among all processes. - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. """ def __init__( self, device_placement: bool = True, split_batches: bool = _split_batches, mixed_precision: PrecisionType | str | None = None, gradient_accumulation_steps: int = 1, cpu: bool = False, dataloader_config: DataLoaderConfiguration | None = None, deepspeed_plugin: DeepSpeedPlugin | None = None, fsdp_plugin: FullyShardedDataParallelPlugin | None = None, megatron_lm_plugin: MegatronLMPlugin | None = None, rng_types: list[str | RNGType] | None = None, log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None, project_dir: str | os.PathLike | None = None, project_config: ProjectConfiguration | None = None, gradient_accumulation_plugin: GradientAccumulationPlugin | None = None, dispatch_batches: bool | None = _dispatch_batches, even_batches: bool = _even_batches, use_seedable_sampler: bool = _use_seedable_sampler, step_scheduler_with_optimizer: bool = True, kwargs_handlers: list[KwargsHandler] | None = None, dynamo_backend: DynamoBackend | str | None = None, ): self.trackers = [] if project_config is not None: self.project_configuration = project_config else: self.project_configuration = ProjectConfiguration(project_dir=project_dir) if project_dir is not None and self.project_dir is None: self.project_configuration.set_directories(project_dir) if mixed_precision is not None: mixed_precision = str(mixed_precision) if mixed_precision not in PrecisionType: raise ValueError( f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" ) dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend) if deepspeed_plugin is None: # init from env variables deepspeed_plugin = ( DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None ) else: assert isinstance( deepspeed_plugin, DeepSpeedPlugin ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object." os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided if deepspeed_plugin: if not is_deepspeed_available(): raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") if is_mlu_available(): if compare_versions("deepspeed-mlu", "<", "0.10.1"): raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.") elif compare_versions("deepspeed", "<", "0.9.3"): raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.") mixed_precision = ( os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision ) deepspeed_plugin.set_mixed_precision(mixed_precision) deepspeed_plugin.set_deepspeed_weakref() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance( fsdp_plugin, FullyShardedDataParallelPlugin ): if is_torch_version("<", FSDP_PYTORCH_VERSION): raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}") if fsdp_plugin is None: # init from env variables fsdp_plugin = ( FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None ) else: if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided if megatron_lm_plugin is None: # init from env variables megatron_lm_plugin = ( MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None ) else: if not isinstance(megatron_lm_plugin, MegatronLMPlugin): raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided if megatron_lm_plugin: if not is_megatron_lm_available(): raise ImportError("Megatron is not installed. please build it from source.") # Kwargs handlers self.ddp_handler = None self.scaler_handler = None self.init_handler = None self.fp8_recipe_handler = None self.autocast_handler = None if kwargs_handlers is not None: for handler in kwargs_handlers: assert isinstance( handler, KwargsHandler ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." if isinstance(handler, DistributedDataParallelKwargs): if self.ddp_handler is not None: raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.") else: self.ddp_handler = handler elif isinstance(handler, GradScalerKwargs): if self.scaler_handler is not None: raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.") else: self.scaler_handler = handler elif isinstance(handler, InitProcessGroupKwargs): if self.init_handler is not None: raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.") else: self.init_handler = handler elif isinstance(handler, FP8RecipeKwargs): if self.fp8_recipe_handler is not None: raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.") else: self.fp8_recipe_handler = handler elif isinstance(handler, AutocastKwargs): if self.autocast_handler is not None: raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.") else: self.autocast_handler = handler kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} self.state = AcceleratorState( mixed_precision=mixed_precision, cpu=cpu, dynamo_plugin=dynamo_plugin, deepspeed_plugin=deepspeed_plugin, fsdp_plugin=fsdp_plugin, megatron_lm_plugin=megatron_lm_plugin, _from_accelerator=True, **kwargs, ) if self.fp8_recipe_handler is None and self.state.mixed_precision == "fp8": self.fp8_recipe_handler = FP8RecipeKwargs(backend="MSAMP" if is_msamp_available() else "TE") trackers = filter_trackers(log_with, self.logging_dir) if len(trackers) < 1 and log_with is not None: warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") self.log_with = trackers if ( (mixed_precision != "bf16") and getattr(self.state, "downcast_bfloat", False) and (self.state.distributedType != DistributedType.XLA) ): raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") if gradient_accumulation_plugin is not None: if gradient_accumulation_steps != 1: raise ValueError( "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object." ) else: gradient_accumulation_steps = int( parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps) ) gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps) self.gradient_state = GradientState( gradient_accumulation_plugin=gradient_accumulation_plugin, ) self.device_placement = device_placement if dataloader_config is None: dataloader_config = DataLoaderConfiguration() self.dataloader_config = dataloader_config # Deal with deprecated args # TODO: Remove in v1.0.0 deprecated_dl_args = {} if dispatch_batches is not _dispatch_batches: deprecated_dl_args["dispatch_batches"] = dispatch_batches self.dataloader_config.dispatch_batches = dispatch_batches if split_batches is not _split_batches: deprecated_dl_args["split_batches"] = split_batches self.dataloader_config.split_batches = split_batches if even_batches is not _even_batches: deprecated_dl_args["even_batches"] = even_batches self.dataloader_config.even_batches = even_batches if use_seedable_sampler is not _use_seedable_sampler: deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler self.dataloader_config.use_seedable_sampler = use_seedable_sampler if len(deprecated_dl_args) > 0: values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()]) warnings.warn( f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. " "Please pass an `accelerate.DataLoaderConfiguration` instead: \n" f"dataloader_config = DataLoaderConfiguration({values})", FutureWarning, ) self.step_scheduler_with_optimizer = step_scheduler_with_optimizer # Mixed precision attributes self.scaler = None self.native_amp = False if ( self.state.mixed_precision == "fp16" and self.device.type != "cpu" and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) ): self.native_amp = True if self.device.type not in ("xpu", "cuda", "mps", "npu", "xla", "mlu") or is_torch_xla_available( check_is_tpu=True ): raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).") kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} if self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler self.scaler = ShardedGradScaler(**kwargs) elif is_torch_xla_available(check_is_gpu=True): self.scaler = xamp.GradScaler(**kwargs) elif is_mlu_available(): self.scaler = torch.mlu.amp.GradScaler(**kwargs) elif is_npu_available(): self.scaler = torch.npu.amp.GradScaler(**kwargs) else: self.scaler = torch.cuda.amp.GradScaler(**kwargs) elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM, ): if self.device.type in ["cpu", "xpu"]: self.native_amp = True else: self.native_amp = is_bf16_available(True) if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available(): raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") # Start of internal step tracking self.step = 0 # Internal references to the training objects self._optimizers = [] self._models = [] self._schedulers = [] self._dataloaders = [] self._custom_objects = [] # Hooks self._load_model_state_pre_hook = OrderedDict() self._save_model_state_pre_hook = OrderedDict() # RNG Types self.rng_types = rng_types if self.rng_types is None: self.rng_types = ["generator"] # Set a flag tensor for early stopping and other breakpoints self.flag_tensor = None check_os_kernel() @property def use_distributed(self): """ Whether the Accelerator is configured for distributed training """ return self.state.use_distributed @property def distributed_type(self): return self.state.distributed_type @property def num_processes(self): return self.state.num_processes @property def process_index(self): return self.state.process_index @property def local_process_index(self): return self.state.local_process_index @property def device(self): return self.state.device @property def split_batches(self): return self.dataloader_config.split_batches @property def dispatch_batches(self): return self.dataloader_config.dispatch_batches @property def even_batches(self): return self.dataloader_config.even_batches @even_batches.setter def even_batches(self, value: bool): self.dataloader_config.even_batches = value @property def use_seedable_sampler(self): return self.dataloader_config.use_seedable_sampler @property def project_dir(self): return self.project_configuration.project_dir @property def logging_dir(self): return self.project_configuration.logging_dir @property def save_iteration(self): return self.project_configuration.iteration @property def is_main_process(self): """True for one process only.""" return self.state.is_main_process @property def is_local_main_process(self): """True for one process per server.""" return self.state.is_local_main_process @property def use_fp16(self): warnings.warn( "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " "`Accelerator.mixed_precision == 'fp16'` instead.", FutureWarning, ) return self.mixed_precision != "no" @property def is_last_process(self): return self.process_index == self.num_processes - 1 @property def mixed_precision(self): return self.state.mixed_precision @contextmanager def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): """ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing distributed inference, such as with different prompts. Note that when using a `dict`, all keys need to have the same number of elements. Args: inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): The input to split between processes. apply_padding (`bool`, `optional`, defaults to `False`): Whether to apply padding by repeating the last element of the input so that all processes have the same number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs or passing in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. Example: ```python # Assume there are two processes from accelerate import Accelerator accelerator = Accelerator() with accelerator.split_between_processes(["A", "B", "C"]) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C"] with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: print(inputs) # Process 0 ["A", "B"] # Process 1 ["C", "C"] ``` """ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: yield inputs def on_main_process(self, function: Callable[..., Any] = None): """ A decorator that will run the decorated function on the main process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> @accelerator.on_main_process ... def print_something(): ... print("This will be printed by process 0 only.") >>> print_something() "This will be printed by process 0 only" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_main_process(function)(*args, **kwargs) return _inner def on_local_main_process(self, function: Callable[..., Any] = None): """ A decorator that will run the decorated function on the local main process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_local_main_process def print_something(): print("This will be printed by process 0 only on each server.") print_something() # On server 1: "This will be printed by process 0 only" # On server 2: "This will be printed by process 0 only" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_local_main_process(function)(*args, **kwargs) return _inner def on_last_process(self, function: Callable[..., Any]): """ A decorator that will run the decorated function on the last process only. Can also be called using the `PartialState` class. Args: function (`Callable`): The function to decorate. Example: ```python # Assume we have 4 processes. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_last_process def print_something(): print(f"Printed on process {accelerator.process_index}") print_something() "Printed on process 3" ``` """ # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_last_process(function)(*args, **kwargs) return _inner def on_process(self, function: Callable[..., Any] = None, process_index: int = None): """ A decorator that will run the decorated function on a given process index only. Can also be called using the `PartialState` class. Args: function (`Callable`, `optional`): The function to decorate. process_index (`int`, `optional`): The index of the process on which to run the function. Example: ```python # Assume we have 4 processes. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_process(process_index=2) def print_something(): print(f"Printed on process {accelerator.process_index}") print_something() "Printed on process 2" ``` """ # Initial construction of the decorator. if (self is not None) and (process_index is not None) and (function is None): return partial(self.on_process, process_index=process_index) # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_process(function, process_index)(*args, **kwargs) return _inner def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): """ A decorator that will run the decorated function on a given local process index only. Can also be called using the `PartialState` class. Args: function (`Callable`, *optional*): The function to decorate. local_process_index (`int`, *optional*): The index of the local process on which to run the function. Example: ```python # Assume we have 2 servers with 4 processes each. from accelerate import Accelerator accelerator = Accelerator() @accelerator.on_local_process(local_process_index=2) def print_something(): print(f"Printed on process {accelerator.local_process_index}") print_something() # On server 1: "Printed on process 2" # On server 2: "Printed on process 2" ``` """ # Initial construction of the decorator. if (self is not None) and (local_process_index is not None) and (function is None): return partial(self.on_local_process, local_process_index=local_process_index) # For times when the `Accelerator` object itself utilizes this decorator. if function is None: if "Accelerator." in self.__qualname__: function = self else: raise ValueError( "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." ) def _inner(*args, **kwargs): return PartialState().on_local_process(function, local_process_index)(*args, **kwargs) return _inner @contextmanager def main_process_first(self): """ Lets the main process go first inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.main_process_first(): ... # This will be printed first by process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.process_index}") ``` """ with self.state.main_process_first(): yield @contextmanager def local_main_process_first(self): """ Lets the local main process go inside a with block. The other processes will enter the with block after the main process exits. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> with accelerator.local_main_process_first(): ... # This will be printed first by local process 0 then in a seemingly ... # random order by the other processes. ... print(f"This will be printed by process {accelerator.local_process_index}") ``` """ with self.state.local_main_process_first(): yield @contextmanager def no_sync(self, model): """ A context manager to disable gradient synchronizations across DDP processes by calling `torch.nn.parallel.DistributedDataParallel.no_sync`. If `model` is not in DDP, this context manager does nothing Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) >>> input_a = next(iter(dataloader)) >>> input_b = next(iter(dataloader)) >>> with accelerator.no_sync(): ... outputs = model(input_a) ... loss = loss_func(outputs) ... accelerator.backward(loss) ... # No synchronization across processes, only accumulate gradients >>> outputs = model(input_b) >>> accelerator.backward(loss) >>> # Synchronization across all processes >>> optimizer.step() >>> optimizer.zero_grad() ``` """ context = contextlib.nullcontext if self.use_distributed: context = getattr(model, "no_sync", context) with context(): yield @staticmethod @contextmanager def trigger_sync_in_backward(model): """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under `Accelerator.no_sync` (only applicable in multi-GPU scenarios). If the script is not launched in distributed mode, this context manager does nothing. Args: model (`torch.nn.Module`): The model for which to trigger the gradient synchronization. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) >>> with accelerator.no_sync(): ... loss_a = loss_func(model(input_a)) # first forward pass ... loss_b = loss_func(model(input_b)) # second forward pass >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients >>> with accelerator.trigger_sync_in_backward(model): ... accelerator.backward(loss_b) # Synchronization across all processes >>> optimizer.step() >>> optimizer.zero_grad() ``` """ if not isinstance(model, torch.nn.parallel.DistributedDataParallel): yield return old_require_backward_grad_sync = model.require_backward_grad_sync old_require_forward_param_sync = model.require_forward_param_sync # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features. # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466 model.require_backward_grad_sync = True model.require_forward_param_sync = True # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402 model.reducer.prepare_for_backward([]) try: yield finally: model.require_backward_grad_sync = old_require_backward_grad_sync model.require_forward_param_sync = old_require_forward_param_sync def _do_sync(self, force: bool = False): "Sets the right `sync_gradients` context and either resets or increases `self.step`" if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader: self.step = 0 self.gradient_state._set_sync_gradients(True) else: self.step += 1 self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0)) @property def sync_gradients(self): return self.gradient_state.sync_gradients @sync_gradients.setter def sync_gradients(self, sync_gradients): self.gradient_state.sync_gradients = sync_gradients @property def gradient_accumulation_steps(self): return self.gradient_state.num_steps @gradient_accumulation_steps.setter def gradient_accumulation_steps(self, gradient_accumulation_steps): self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps}) @contextmanager def accumulate(self, *models): """ A context manager that will lightly wrap around and perform gradient accumulation automatically Args: *models (list of `torch.nn.Module`): PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will skip gradient syncing during backward pass in distributed training Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=1) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, output in dataloader: ... with accelerator.accumulate(model): ... outputs = model(input) ... loss = loss_func(outputs) ... loss.backward() ... optimizer.step() ... scheduler.step() ... optimizer.zero_grad() ``` """ # sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore # resulting in the nullcontext always being selected. self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False)) with contextlib.ExitStack() as cm_stack: for m in models: cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m)) yield @contextmanager def join_uneven_inputs(self, joinables, even_batches=None): """ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the length of the dataset. Args: joinables (`list[torch.distributed.algorithms.Joinable]`): A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. even_batches (`bool`, *optional*) If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, the default `Accelerator` value wil be used. <Tip warning={true}> `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other configuration, this method will have no effect. </Tip> <Tip warning={true}> Overidding `even_batches` will not affect iterable-style data loaders. </Tip> Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(even_batches=True) >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): ... for input, output in dataloader: ... outputs = model(input) ... loss = loss_func(outputs) ... loss.backward() ... optimizer.step() ... optimizer.zero_grad() ``` """ if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_XPU, ): dl_even_batches_values = [] if even_batches is not None: iterable_dl_seen = False # override value in batch sampler for map-style datasets for dl_idx, dl in enumerate(self._dataloaders): if isinstance(dl, DataLoaderDispatcher): iterable_dl_seen = True continue dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) dl.batch_sampler.even_batches = even_batches if iterable_dl_seen: warnings.warn( "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" ) else: even_batches = self.even_batches enable_join = False if even_batches else True try: with Join(joinables, enable=enable_join, throw_on_early_termination=False): yield finally: # reset any batch samplers that have been modified for dl_idx, even_batches_value in dl_even_batches_values: self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value else: # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs if self.distributed_type != DistributedType.NO: warnings.warn( "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." ) with contextlib.nullcontext(joinables): yield def print(self, *args, **kwargs): """ Drop in replacement of `print()` to only print once per server. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> accelerator.print("Hello world!") ``` """ self.state.print(*args, **kwargs) def _prepare_one(self, obj, first_pass=False, device_placement=None): # First pass of preparation: DataLoader, model, optimizer if first_pass: if isinstance(obj, torch.utils.data.DataLoader): return self.prepare_data_loader(obj, device_placement=device_placement) elif isinstance(obj, torch.nn.Module): return self.prepare_model(obj, device_placement=device_placement) elif isinstance(obj, torch.optim.Optimizer): optimizer = self.prepare_optimizer(obj, device_placement=device_placement) return optimizer # Second pass of preparation: LR scheduler (which need the full list of optimizers) elif isinstance(obj, LRScheduler): scheduler = self.prepare_scheduler(obj) return scheduler # Return the unprocessed object if previous criteria was not met return obj def prepare(self, *args, device_placement=None): """ Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same order. Args: *args (list of objects): Any of the following type of objects: - `torch.utils.data.DataLoader`: PyTorch Dataloader - `torch.nn.Module`: PyTorch Module - `torch.optim.Optimizer`: PyTorch Optimizer - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler device_placement (`list[bool]`, *optional*): Used to customize whether automatic device placement should be performed for each object passed. Needs to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP. <Tip> You don't need to prepare a model if you only use it for inference without any kind of mixed precision </Tip> Examples: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model, optimizer, data_loader and scheduler are defined >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler) ``` ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model, optimizer, data_loader and scheduler are defined >>> device_placement = [True, True, False, False] >>> # Will place the first to items passed in automatically to the right device but not the last two. >>> model, optimizer, data_loader, scheduler = accelerator.prepare( ... model, optimizer, data_loader, scheduler, device_placement=device_placement ... ) ``` """ if device_placement is None: device_placement = [None for _ in args] elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") elif len(device_placement) != len(args): raise ValueError( f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." ) for obj in args: # TODO: Look at enabling native TP training directly with a proper config if ( isinstance(obj, torch.nn.Module) and self.verify_device_map(obj) and self.distributed_type != DistributedType.NO and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" ): raise ValueError( "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." ) if self.distributed_type == DistributedType.DEEPSPEED: model_count = 0 for obj in args: if isinstance(obj, torch.nn.Module): model_count += 1 if model_count > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" ) # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will # have parameters disconnected from the model (so no training :-( ). # If the model and optimizer have parameters on different devices we raise an error. if self.distributed_type == DistributedType.XLA: model_device, optimizer_device = self._get_devices() if model_device is not None and optimizer_device is not None and model_device != optimizer_device: raise ValueError( "The model and the optimizer parameters are not on the same device, which probably means you " "created an optimizer around your model **before** putting on the device. Make sure the line " "model.to(device) is before the optimizer creation in your script or remove it entirely and use " "the flag default value for `device_placement` in your `Accelerator` to let it handle that " "part for you." ) # If we're dealing with device placement, this deals with that by... tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): # 1. grabbing old model parameters old_named_params = self._get_named_parameters(*args) if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: if self.device.type == "cpu" and self.state.use_ipex: args = self._prepare_ipex(*args) elif self.device.type == "xpu" and is_xpu_available(): args = self._prepare_ipex(*args) if self.distributed_type == DistributedType.DEEPSPEED: result = self._prepare_deepspeed(*args) elif self.distributed_type == DistributedType.MEGATRON_LM: result = self._prepare_megatron_lm(*args) else: if self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "MSAMP": args = self._prepare_msamp(*args) # MS-AMP will handle the device placement device_placement = [False for _ in args] result = tuple( self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) ) result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): # 2. grabbing new model parameters new_named_params = self._get_named_parameters(*result) # 3. building a map from the first to the second mapping = {p: new_named_params[n] for n, p in old_named_params.items()} # 4. using that map to update the parameters of the optimizer for obj in result: if isinstance(obj, torch.optim.Optimizer): obj._switch_parameters(mapping) for item in result: if any( item in container for container in (self._dataloaders, self._models, self._optimizers, self._schedulers) ): item._is_accelerate_prepared = True return result if len(result) > 1 else result[0] def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False): """ Prepares a PyTorch model for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: model (`torch.nn.Module`): A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without any kind of mixed precision device_placement (`bool`, *optional*): Whether or not to place the model on the proper device. Will default to `self.device_placement`. evaluation_mode (`bool`, *optional*, defaults to `False`): Whether or not to set the model for evaluation only, by just applying mixed precision and `torch.compile` (if configured in the `Accelerator` object). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume a model is defined >>> model = accelerator.prepare_model(model) ``` """ if device_placement is None: device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP self._models.append(model) # TODO: Look at enabling native TP training directly with a proper config if ( self.verify_device_map(model) and self.distributed_type != DistributedType.NO and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" ): raise ValueError( "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." ) if self.native_amp: model._original_forward = model.forward model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler) new_forward = autocast_context(model_forward_func) if hasattr(model.forward, "__func__"): model.forward = MethodType(new_forward, model) model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) else: model.forward = convert_outputs_to_fp32(new_forward) elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE": if not has_transformer_engine_layers(model): with torch.no_grad(): convert_model(model) model._converted_to_transformer_engine = True model._original_forward = model.forward kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {} if "fp8_format" in kwargs: kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) fp8_recipe = te_recipe.DelayedScaling(**kwargs) model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward) if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr( model, "hf_device_map", False ): model_devices = set(model.hf_device_map.values()) if len(model_devices) > 1 and self.distributed_type != DistributedType.NO: raise ValueError( "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode." " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism." " Therefore you should not specify that you are under any distributed regime in your accelerate config." ) current_device = list(model_devices)[0] current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device if torch.device(current_device_index) != self.device: # if on the first device (GPU 0) we don't care if (self.device.index is not None) or (current_device_index != 0): raise ValueError( "You can't train a model that has been loaded in 8-bit precision on a different device than the one " "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}" ) if "cpu" in model_devices or "disk" in model_devices: raise ValueError( "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload." ) elif device_placement and not self.verify_device_map(model): model = model.to(self.device) if not evaluation_mode: if self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, ): if any(p.requires_grad for p in model.parameters()): kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} # TODO: Look at enabling native TP training directly with a proper config if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true": device_ids, output_device = [self.local_process_index], self.local_process_index else: device_ids, output_device = None, None model = torch.nn.parallel.DistributedDataParallel( model, device_ids=device_ids, output_device=output_device, **kwargs ) elif self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, # don't wrap it again # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it # is a FSDP model, don't wrap it again is_type_fsdp = isinstance(model, FSDP) or ( is_compiled_module(model) and isinstance(model._orig_mod, FSDP) ) if not is_type_fsdp: self.state.fsdp_plugin.set_auto_wrap_policy(model) fsdp_plugin = self.state.fsdp_plugin kwargs = { "sharding_strategy": fsdp_plugin.sharding_strategy, "cpu_offload": fsdp_plugin.cpu_offload, "auto_wrap_policy": fsdp_plugin.auto_wrap_policy, "mixed_precision": fsdp_plugin.mixed_precision_policy, "sync_module_states": fsdp_plugin.sync_module_states, "backward_prefetch": fsdp_plugin.backward_prefetch, "forward_prefetch": fsdp_plugin.forward_prefetch, "use_orig_params": fsdp_plugin.use_orig_params, "param_init_fn": fsdp_plugin.param_init_fn, "ignored_modules": fsdp_plugin.ignored_modules, "limit_all_gathers": fsdp_plugin.limit_all_gathers, "device_id": self.device, } model = FSDP(model, **kwargs) if fsdp_plugin.activation_checkpointing: from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper, ) apply_activation_checkpointing( model, checkpoint_wrapper_fn=functools.partial( checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT, ), auto_wrap_policy=fsdp_plugin.auto_wrap_policy, ) # if the previous and current models are same, delete the previous one if len(self._models) > 1 and (self._models[-2] is self._models[-1]): del self._models[-2] self._models[-1] = model elif self.distributed_type == DistributedType.MULTI_CPU: kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) elif self.distributed_type == DistributedType.XLA and self.state.fork_launched: model = xmp.MpModelWrapper(model).to(self.device) # torch.compile should be called last and only if the model isn't already compiled. if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): if not is_torch_version(">=", "2.0"): raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.") model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) return model def _prepare_deepspeed(self, *args): import deepspeed deepspeed_plugin = self.state.deepspeed_plugin is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args) result = [ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args ] if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"): if is_dataloader_present: batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] if any(bs is None for bs in batch_sizes): raise ValueError( "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. " "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." ) if self.split_batches: batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) if len(batch_sizes) > 1: logger.info( "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." ) else: raise ValueError( "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " "with `batch_size` attribute returning an integer value " "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." ) else: batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu") # handle `gradient_accumulation_steps` when the value is `auto` deepspeed_plugin.fill_match( "gradient_accumulation_steps", must_match=False, gradient_accumulation_steps=self.gradient_accumulation_steps, ) config_kwargs = { "train_micro_batch_size_per_gpu": batch_size_per_device, "train_batch_size": batch_size_per_device * deepspeed_plugin.get_value("gradient_accumulation_steps") * self.num_processes, "gradient_clipping": 1.0, "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, } model = None optimizer = None scheduler = None for obj in result: if isinstance(obj, torch.nn.Module): model = obj elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): optimizer = obj elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES ): scheduler = obj if optimizer is not None: if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): raise ValueError( "You cannot specify an optimizer in the config file and in the code at the same time. " "Please remove the optimizer from the config file or " "create `accelerate.utils.DummyOptim` in the code." ) elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): raise ValueError( "You cannot create a `DummyOptim` without specifying an optimizer in the config file." ) if isinstance(optimizer, (torch.optim.Optimizer)): deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True if scheduler is not None: if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): raise ValueError( "You cannot specify a scheduler in the config file and in the code at the same time. " "Please remove the scheduler from the config file or " "create `accelerate.utils.DummyScheduler` in the code." ) elif ( "scheduler" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None ): raise ValueError( "Either specify a scheduler in the config file or " "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`." ) if optimizer is not None and scheduler is not None: if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): raise ValueError( "You can only specify `accelerate.utils.DummyScheduler` in the code when using " "`accelerate.utils.DummyOptim`." ) if model is not None: # deal with config keys that use `auto` value and rely on model's hidden_size hidden_size_based_keys = [ "zero_optimization.reduce_bucket_size", "zero_optimization.stage3_prefetch_bucket_size", "zero_optimization.stage3_param_persistence_threshold", ] hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)] if len(hidden_size_auto_keys) > 0: reasoning = ( "therefore it's not possible to automatically fill out the following `auto` entries " + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + "`auto` values for these keys with an integer value of your choice." ) if not hasattr(model, "config"): raise ValueError("Can't find `model.config` entry, " + reasoning) if hasattr(model.config, "hidden_size"): hidden_size = model.config.hidden_size elif hasattr(model.config, "hidden_sizes"): # if there are many hidden sizes pick the largest one hidden_size = max(model.config.hidden_sizes) else: raise ValueError( "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning ) config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, } ) if isinstance(optimizer, (DummyOptim)): config_kwargs.update( {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} ) if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None: max_lr = ( getattr(scheduler.optimizer, "lr", None) if getattr(scheduler.optimizer, "defaults", None) is None else scheduler.optimizer.defaults["lr"] ) config_kwargs.update( { "scheduler.params.warmup_min_lr": 0, "scheduler.params.warmup_max_lr": max_lr, "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, } ) if scheduler.total_num_steps is not None: config_kwargs["scheduler.params.total_num_steps"] = ( math.ceil(scheduler.total_num_steps / self.num_processes) if not self.split_batches else scheduler.total_num_steps ) deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) self.deepspeed_config = deepspeed_plugin.deepspeed_config kwargs = dict(model=model, config_params=self.deepspeed_config) if optimizer is not None: if isinstance(optimizer, (DummyOptim)): kwargs["model_parameters"] = optimizer.params if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None: kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable else: if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get( "device", "none" ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True): from deepspeed.ops.adam import DeepSpeedCPUAdam defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults) kwargs["optimizer"] = optimizer if scheduler is not None: if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES: kwargs["lr_scheduler"] = scheduler engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) if optimizer is not None: optimizer = DeepSpeedOptimizerWrapper(optimizer) if scheduler is not None: if lr_scheduler is None: scheduler = AcceleratedScheduler( scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches, ) else: scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = engine elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): result[i] = optimizer elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES ): result[i] = scheduler # pointing for deepspeed_engine_wrapped.backward() self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) self._models.append(engine) if optimizer is not None: self._optimizers.append(optimizer) if scheduler is not None: self._schedulers.append(scheduler) if len(self._models) > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" ) return tuple(result) def _prepare_megatron_lm(self, *args): megatron_lm_plugin = self.state.megatron_lm_plugin if not megatron_lm_plugin.megatron_dataset_flag: batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] if len(batch_sizes) == 0: raise ValueError( "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." ) micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) if len(batch_sizes) > 1: logger.info( "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." ) else: for obj in args: if isinstance(obj, MegatronLMDummyDataLoader): micro_batch_size = obj.dataset_args["micro_batch_size"] break dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) model = None optimizer = None scheduler = None is_dummy_scheduler = False batch_data = None for obj in args: if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: batch_data = next(iter(obj)) if isinstance(obj, torch.nn.Module): model = obj elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): scheduler = obj if model is not None: megatron_lm_plugin.set_network_size_args(model, batch_data) if optimizer is not None: megatron_lm_plugin.set_optimizer_type(optimizer) if scheduler is not None: is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler) if not is_dummy_scheduler: raise ValueError( "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." ) megatron_lm_plugin.set_scheduler_args(scheduler) # initialize megatron-lm megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) counter = 0 result = [] for obj in args: if isinstance(obj, torch.utils.data.DataLoader): result.append(megatron_lm_prepare_data_loader(self, obj)) counter += 1 elif isinstance(obj, MegatronLMDummyDataLoader): if counter == 0: obj.set_megatron_data_args() dataloaders = megatron_lm_prepare_data_loader(self, obj) result.append(dataloaders[counter]) counter += 1 else: result.append(obj) if model is not None: model = megatron_lm_prepare_model(self) if optimizer is not None: optimizer = megatron_lm_prepare_optimizer(self, model) if scheduler is not None: scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler) if model is not None: model = MegatronEngine(self, model, optimizer, scheduler) if optimizer is not None: optimizer = MegatronLMOptimizerWrapper(optimizer) if scheduler is not None: scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], torch.optim.Optimizer): result[i] = optimizer elif isinstance(result[i], MegatronLMDummyScheduler): result[i] = scheduler if model is not None: self._models.append(model) if optimizer is not None: self._optimizers.append(optimizer) if scheduler is not None: self._schedulers.append(scheduler) if len(self._models) > 1: raise AssertionError( "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" ) return tuple(result) def _prepare_ipex(self, *args): if not is_ipex_available(): raise ImportError( "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer" " to https://github.com/intel/intel-extension-for-pytorch." ) else: import intel_extension_for_pytorch as ipex model = None optimizer = None result = [obj for obj in args] for obj in result: if isinstance(obj, torch.nn.Module): model = obj model.train() elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj if optimizer is not None and model is not None: dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None if self.device.type == "xpu" and is_xpu_available(): model = model.to(self.device) model, optimizer = torch.xpu.optimize( model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1" ) else: model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1") for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], (torch.optim.Optimizer)): result[i] = optimizer return tuple(result) def _prepare_msamp(self, *args): if not is_msamp_available(): raise ImportError( "MS-AMP was not found on your system. Please ensure that MS-AMP is available " " or choose `'te'` as the backend for FP8 mixed precision training." ) else: import msamp model, optimizer = None, None num_models, num_optimizers = 0, 0 result = [obj for obj in args] for obj in result: if isinstance(obj, torch.nn.Module): model = obj num_models += 1 elif isinstance(obj, (torch.optim.Optimizer)): optimizer = obj num_optimizers += 1 if optimizer is None or model is None: raise ValueError( "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP." ) elif num_models > 1 or num_optimizers > 1: raise ValueError( f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP." ) else: model, optimizer = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level) for i in range(len(result)): if isinstance(result[i], torch.nn.Module): result[i] = model elif isinstance(result[i], (torch.optim.Optimizer)): result[i] = optimizer return tuple(result) def prepare_data_loader( self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None ): """ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: data_loader (`torch.utils.data.DataLoader`): A vanilla PyTorch DataLoader to prepare device_placement (`bool`, *optional*): Whether or not to place the batches on the proper device in the prepared dataloader. Will default to `self.device_placement`. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> data_loader = torch.utils.data.DataLoader(...) >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True) ``` """ # Ensure we can't double wrap a DataLoader due to `find_batch_size` if getattr(data_loader, "_is_accelerate_prepared", False): if data_loader not in self._dataloaders: self._dataloaders.append(data_loader) return data_loader if device_placement is None: device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False prepared_data_loader = prepare_data_loader( data_loader, self.device, num_processes=self.num_processes, process_index=self.process_index, split_batches=self.split_batches, put_on_device=device_placement, rng_types=self.rng_types.copy(), dispatch_batches=self.dispatch_batches, even_batches=self.even_batches, slice_fn_for_dispatch=slice_fn_for_dispatch, use_seedable_sampler=self.use_seedable_sampler, ) self._dataloaders.append(prepared_data_loader) return prepared_data_loader def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): """ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: optimizer (`torch.optim.Optimizer`): A vanilla PyTorch optimizer to prepare device_placement (`bool`, *optional*): Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> optimizer = torch.optim.Adam(...) >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True) ``` """ # Ensure we can't double wrap an optimizer due to `find_batch_size` if getattr(optimizer, "_is_accelerate_prepared", False): if optimizer not in self._optimizers: self._optimizers.append(optimizer) return optimizer if device_placement is None: device_placement = self.device_placement optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) self._optimizers.append(optimizer) return optimizer def prepare_scheduler(self, scheduler: LRScheduler): """ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use [`Accelerator.prepare`] instead. Args: scheduler (`torch.optim.lr_scheduler.LRScheduler`): A vanilla PyTorch scheduler to prepare Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> optimizer = torch.optim.Adam(...) >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) >>> scheduler = accelerator.prepare_scheduler(scheduler) ``` """ # Ensure we can't double wrap a scheduler due to `find_batch_size` if getattr(scheduler, "_is_accelerate_prepared", False): if scheduler not in self._schedulers: self._schedulers.append(scheduler) return scheduler # We try to find the optimizer associated with `scheduler`, the default is the full list. optimizer = self._optimizers for opt in self._optimizers: if getattr(scheduler, "optimizer", None) == opt.optimizer: optimizer = opt break scheduler = AcceleratedScheduler( scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches, ) self._schedulers.append(scheduler) return scheduler def backward(self, loss, **kwargs): """ Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based on the configuration. Should be used in lieu of `loss.backward()`. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> outputs = model(inputs) >>> loss = loss_fn(outputs, labels) >>> accelerator.backward(loss) ``` """ if self.distributed_type != DistributedType.DEEPSPEED: # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` loss = loss / self.gradient_accumulation_steps if self.distributed_type == DistributedType.DEEPSPEED: self.deepspeed_engine_wrapped.backward(loss, **kwargs) elif self.distributed_type == DistributedType.MEGATRON_LM: return elif self.scaler is not None: self.scaler.scale(loss).backward(**kwargs) else: loss.backward(**kwargs) def set_trigger(self): """ Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which will check across all processes. Note: Does not require `wait_for_everyone()` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume later in the training script >>> # `should_do_breakpoint` is a custom function to monitor when to break, >>> # e.g. when the loss is NaN >>> if should_do_breakpoint(loss): ... accelerator.set_trigger() >>> # Assume later in the training script >>> if accelerator.check_breakpoint(): ... break ``` """ self.flag_tensor = torch.tensor(1, device=self.device) def check_trigger(self): """ Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and reset the trigger tensor to 0. Note: Does not require `wait_for_everyone()` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume later in the training script >>> # `should_do_breakpoint` is a custom function to monitor when to break, >>> # e.g. when the loss is NaN >>> if should_do_breakpoint(loss): ... accelerator.set_trigger() >>> # Assume later in the training script >>> if accelerator.check_trigger(): ... break ``` """ # Now that we are outside `__init__`, we can initialize it if it is `None` on device if self.flag_tensor is None: self.flag_tensor = torch.tensor(0, device=self.device) flag_tensor = self.reduce(self.flag_tensor) if flag_tensor.item() >= 1: self.flag_tensor = torch.tensor(0, device=self.device) return True return False def unscale_gradients(self, optimizer=None): """ Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`] Args: optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*): The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers that were passed to [`~Accelerator.prepare`]. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer = accelerator.prepare(model, optimizer) >>> outputs = model(inputs) >>> loss = loss_fn(outputs, labels) >>> accelerator.backward(loss) >>> accelerator.unscale_gradients(optimizer=optimizer) ``` """ if self.native_amp and self.mixed_precision == "fp16": if optimizer is None: # TODO: this unscales all optimizers where we should only unscale the one where parameters are. optimizer = self._optimizers elif not isinstance(optimizer, (tuple, list)): optimizer = [optimizer] for opt in optimizer: while isinstance(opt, AcceleratedOptimizer): opt = opt.optimizer self.scaler.unscale_(opt) def clip_grad_norm_(self, parameters, max_norm, norm_type=2): """ Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, target in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ``` """ if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None elif self.distributed_type == DistributedType.XLA: # Reduce gradients first for XLA for acc_opt in self._optimizers: if not acc_opt.gradient_state.is_xla_gradients_synced: opt = acc_opt while isinstance(opt, AcceleratedOptimizer): opt = opt.optimizer gradients = xm._fetch_gradients(opt) # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor # one by one in self.reduce is non-inplace. xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes) # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step. acc_opt.gradient_state.is_xla_gradients_synced = True self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) def clip_grad_value_(self, parameters, clip_value): """ Should be used in place of `torch.nn.utils.clip_grad_value_`. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for input, target in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_value_(model.parameters(), clip_value) ... optimizer.step() ``` """ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") self.unscale_gradients() torch.nn.utils.clip_grad_value_(parameters, clip_value) def gather(self, tensor): """ Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to regroup the predictions from all processes when doing evaluation. Note: This gather happens in all processes. Args: tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): The tensors to gather across all processes. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. Example: ```python >>> # Assuming four processes >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.tensor([accelerator.process_index]) >>> gathered_tensor = accelerator.gather(process_tensor) >>> gathered_tensor tensor([0, 1, 2, 3]) ``` """ return gather(tensor) def gather_for_metrics(self, input_data): """ Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be used for gathering the inputs and targets for metric calculation. Args: input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`): The tensors or objects for calculating metrics across all processes Example: ```python >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5) >>> dataloader = accelerator.prepare(dataloader) >>> batch = next(iter(dataloader)) >>> gathered_items = accelerator.gather_for_metrics(batch) >>> len(gathered_items) 9 ``` """ try: recursively_apply(lambda x: x, input_data, error_on_other_type=True) all_tensors = True except TypeError: all_tensors = False if not all_tensors: data = gather_object(input_data) else: data = self.gather(input_data) try: if self.gradient_state.end_of_dataloader: # at the end of a dataloader, `gather_for_metrics` regresses to # `gather` unless the dataset has a remainder so log. if self.gradient_state.remainder == -1: logger.info( "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." ) return data elif self.gradient_state.remainder > 0: # Last batch needs to be truncated on distributed systems as it contains additional samples def _adjust_samples(tensor): return tensor[: self.gradient_state.remainder] return recursively_apply(_adjust_samples, data) else: # remainder is 0 # no remainder even though at end of dataloader, so nothing to do. return data else: # Not at the end of the dataloader, no need to adjust the tensors return data except Exception: # Dataset had no length or raised an error return data def reduce(self, tensor, reduction="sum", scale=1.0): """ Reduce the values in *tensor* across all processes based on *reduction*. Note: All processes get the reduced value. Args: tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): The tensors to reduce across all processes. reduction (`str`, *optional*, defaults to "sum"): A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. scale (`float`, *optional*, defaults to 1.0): A default scaling value to be applied after the reduce, only valied on XLA. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The reduced tensor(s). Example: ```python >>> # Assuming two processes >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index) >>> process_tensor = process_tensor.to(accelerator.device) >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum") >>> reduced_tensor tensor([4, 6]) ``` """ return reduce(tensor, reduction, scale) def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): """ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end. Returns: `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The padded tensor(s). Example: ```python >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2 >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device) >>> padded_tensor = accelerator.pad_across_processes(process_tensor) >>> padded_tensor.shape torch.Size([2]) ``` """ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) def unwrap_model(self, model, keep_fp32_wrapper: bool = True): """ Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving the model. Args: model (`torch.nn.Module`): The model to unwrap. keep_fp32_wrapper (`bool`, *optional*, defaults to `True`): Whether to not remove the mixed precision hook if it was added. Returns: `torch.nn.Module`: The unwrapped model. Example: ```python >>> # Assuming two GPU processes >>> from torch.nn.parallel import DistributedDataParallel >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model = accelerator.prepare(MyModel()) >>> print(model.__class__.__name__) DistributedDataParallel >>> model = accelerator.unwrap_model(model) >>> print(model.__class__.__name__) MyModel ``` """ return extract_model_from_parallel(model, keep_fp32_wrapper) def wait_for_everyone(self): """ Will stop the execution of the current process until every other process has reached that point (so this does nothing when the script is only run in one process). Useful to do before saving a model. Example: ```python >>> # Assuming two GPU processes >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` """ wait_for_everyone() @on_main_process def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}): """ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations Args: project_name (`str`): The name of the project. All trackers will save their data based on this config (`dict`, *optional*): Optional starting configuration to be logged. init_kwargs (`dict`, *optional*): A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be formatted like so: ```python {"wandb": {"tags": ["tag_a", "tag_b"]}} ``` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers( ... project_name="my_project", ... config={"learning_rate": 0.001, "batch_size": 32}, ... init_kwargs={"tensorboard": {"flush_secs": 60}}, ... ) ``` """ for tracker in self.log_with: if issubclass(type(tracker), GeneralTracker): # Custom trackers are already initialized self.trackers.append(tracker) else: tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] if tracker_init.requires_logging_directory: # We can skip this check since it was done in `__init__` self.trackers.append( tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) ) else: self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) if config is not None: for tracker in self.trackers: tracker.store_init_configuration(config) def get_tracker(self, name: str, unwrap: bool = False): """ Returns a `tracker` from `self.trackers` based on `name` on the main process only. Args: name (`str`): The name of a tracker, corresponding to the `.name` property. unwrap (`bool`): Whether to return the internal tracking mechanism or to return the wrapped tracker instead (recommended). Returns: `GeneralTracker`: The tracker corresponding to `name` if it exists. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> tensorboard_tracker = accelerator.get_tracker("tensorboard") ``` """ if len(self.trackers) > 0: for tracker in self.trackers: if tracker.name == name: return tracker.tracker if unwrap else tracker raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") # Handle tracker only made on main process return GeneralTracker(_blank=True) @on_main_process def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}): """ Logs `values` to all stored trackers in `self.trackers` on the main process only. Args: values (`dict`): Values should be a dictionary-like object containing only types `int`, `float`, or `str`. step (`int`, *optional*): The run step. If included, the log will be affiliated with this step. log_kwargs (`dict`, *optional*): A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted like so: ```python {"wandb": {"tags": ["tag_a", "tag_b"]}} ``` Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> accelerator.log({"loss": 0.5, "accuracy": 0.9}) ``` """ for tracker in self.trackers: tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) @on_main_process def end_training(self): """ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be called at the end of your script if using experiment tracking. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(log_with="tensorboard") >>> accelerator.init_trackers("my_project") >>> # Do training >>> accelerator.end_training() ``` """ for tracker in self.trackers: tracker.finish() def save(self, obj, f, safe_serialization=False): """ Save the object passed to disk once per machine. Use in place of `torch.save`. Args: obj (`object`): The object to save. f (`str` or `os.PathLike`): Where to save the content of `obj`. safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` Note: If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node, rather than only once on the main node. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> arr = [0, 1, 2, 3] >>> accelerator.save(arr, "array.pkl") ``` """ save( obj, f, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization, ) def save_model( self, model: torch.nn.Module, save_directory: Union[str, os.PathLike], max_shard_size: Union[int, str] = "10GB", safe_serialization: bool = True, ): """ Save a model so that it can be re-loaded using load_checkpoint_in_model Arguments: model: (`torch.nn.Module`): Model to be saved. The model can be wrapped or unwraped. save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model = ... >>> accelerator.save_model(model, save_directory) ``` """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) # get the state_dict of the model if any( [ module._hf_hook.offload for module in model.modules() if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) ] ): state_dict = get_state_dict_offloaded_model(model) else: if any(param.device == torch.device("meta") for param in model.parameters()): raise RuntimeError("You can't save the model since some parameters are on the meta device.") state_dict = self.get_state_dict(model) if safe_serialization: state_dict = clean_state_dict_for_safetensors(state_dict) weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME # Shard the model if it is too big. shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "") # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 filename_no_suffix = filename.replace(".bin", "") reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() and reg.fullmatch(filename_no_suffix) is not None and PartialState().is_main_process ): os.remove(full_filename) # Save the model for shard_file, shard in shards.items(): self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization) if index is None: path_to_weights = os.path.join(save_directory, WEIGHTS_NAME) logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, save_index_file) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: """ Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`]. Args: hook (`Callable`): A function to be called in [`Accelerator.save_state`] before `save_checkpoint`. The hook should have the following signature: `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None` The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths` argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. <Tip> Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save configurations in addition to model weights. Can also be used to overwrite model saving with a customized method. In this case, make sure to remove already loaded weights from the weights list. </Tip> Returns: `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling `handle.remove()` """ handle = hooks.RemovableHandle(self._save_model_state_pre_hook) self._save_model_state_pre_hook[handle.id] = hook return handle def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs): """ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named `checkpoint_<iteration>`. Otherwise they are just saved to `output_dir`. <Tip> Should only be used when wanting to save a checkpoint during training and restoring the state in the same environment. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). save_model_func_kwargs (`dict`, *optional*): Additional keyword arguments for saving model which can be passed to the underlying save function, such as optional arguments for DeepSpeed's `save_checkpoint` function. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, lr_scheduler = ... >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) >>> accelerator.save_state(output_dir="my_checkpoint") ``` """ if self.project_configuration.automatic_checkpoint_naming: output_dir = os.path.join(self.project_dir, "checkpoints") os.makedirs(output_dir, exist_ok=True) if self.project_configuration.automatic_checkpoint_naming: folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] if ( self.project_configuration.total_limit is not None and (len(folders) + 1 > self.project_configuration.total_limit) and self.is_main_process ): def _inner(folder): return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] folders.sort(key=_inner) logger.warning( f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." ) for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: shutil.rmtree(folder) output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") if os.path.exists(output_dir): raise ValueError( f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." ) self.wait_for_everyone() os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving current state to {output_dir}") if self.distributed_type == DistributedType.XLA: # Finish running the previous step before checkpointing xm.mark_step() # Save the models taking care of FSDP and DeepSpeed nuances weights = [] for i, model in enumerate(self._models): if self.distributed_type == DistributedType.FSDP: logger.info("Saving FSDP model") save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i) logger.info(f"FSDP Model saved to output dir {output_dir}") elif self.distributed_type == DistributedType.DEEPSPEED: logger.info("Saving DeepSpeed Model and Optimizer") ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") elif self.distributed_type == DistributedType.MEGATRON_LM: logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") model.save_checkpoint(output_dir) logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") else: weights.append(self.get_state_dict(model, unwrap=False)) # Save the optimizers taking care of FSDP and DeepSpeed nuances optimizers = [] if self.distributed_type == DistributedType.FSDP: for i, opt in enumerate(self._optimizers): logger.info("Saving FSDP Optimizer") save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i) logger.info(f"FSDP Optimizer saved to output dir {output_dir}") elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: optimizers = self._optimizers # Save the lr schedulers taking care of DeepSpeed nuances schedulers = [] if self.distributed_type == DistributedType.DEEPSPEED: for i, scheduler in enumerate(self._schedulers): if isinstance(scheduler, DeepSpeedSchedulerWrapper): continue schedulers.append(scheduler) elif self.distributed_type not in [DistributedType.MEGATRON_LM]: schedulers = self._schedulers # Save the samplers of the dataloaders dataloaders = self._dataloaders # Call model loading hooks that might have been registered with # accelerator.register_model_state_hook for hook in self._save_model_state_pre_hook.values(): hook(self._models, weights, output_dir) save_location = save_accelerator_state( output_dir, weights, optimizers, schedulers, dataloaders, self.state.process_index, self.scaler, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization, ) for i, obj in enumerate(self._custom_objects): save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node) self.project_configuration.iteration += 1 return save_location def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: """ Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`]. Args: hook (`Callable`): A function to be called in [`Accelerator.load_state`] before `load_checkpoint`. The hook should have the following signature: `hook(models: list[torch.nn.Module], input_dir: str) -> None` The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. <Tip> Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load configurations in addition to model weights. Can also be used to overwrite model loading with a customized method. In this case, make sure to remove already loaded models from the models list. </Tip> Returns: `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling `handle.remove()` """ handle = hooks.RemovableHandle(self._load_model_state_pre_hook) self._load_model_state_pre_hook[handle.id] = hook return handle def load_state(self, input_dir: str = None, **load_model_func_kwargs): """ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. <Tip> Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for checkpointing, it will not be loaded if stored in the directory. </Tip> Args: input_dir (`str` or `os.PathLike`): The name of the folder all relevant weights and states were saved in. Can be `None` if `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint. load_model_func_kwargs (`dict`, *optional*): Additional keyword arguments for loading model which can be passed to the underlying load function, such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the model and optimizer on. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, lr_scheduler = ... >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) >>> accelerator.load_state("my_checkpoint") ``` """ if input_dir is not None: # Check if folder exists input_dir = os.path.expanduser(input_dir) if not os.path.isdir(input_dir): raise ValueError(f"Tried to find {input_dir} but folder does not exist") elif self.project_configuration.automatic_checkpoint_naming: # Pick up from automatic checkpoint naming input_dir = os.path.join(self.project_dir, "checkpoints") folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)] def _inner(folder): return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] folders.sort(key=_inner) input_dir = folders[-1] else: raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.") logger.info(f"Loading states from {input_dir}") # Load the models taking care of FSDP and DeepSpeed nuances models = [] for i, model in enumerate(self._models): if self.distributed_type == DistributedType.FSDP: logger.info("Loading FSDP model") load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i) logger.info(f"FSDP Model loaded from input dir {input_dir}") elif self.distributed_type == DistributedType.DEEPSPEED: logger.info("Loading DeepSpeed Model and Optimizer") ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") elif self.distributed_type == DistributedType.MEGATRON_LM: logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") model.load_checkpoint(input_dir) logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") else: models.append(model) # Load the optimizers taking care of FSDP and DeepSpeed nuances optimizers = [] if self.distributed_type == DistributedType.FSDP: for i, opt in enumerate(self._optimizers): logger.info("Loading FSDP Optimizer") load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i) logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: optimizers = self._optimizers # Load the lr schedulers taking care of DeepSpeed nuances schedulers = [] if self.distributed_type == DistributedType.DEEPSPEED: for i, scheduler in enumerate(self._schedulers): if isinstance(scheduler, DeepSpeedSchedulerWrapper): continue schedulers.append(scheduler) elif self.distributed_type not in [DistributedType.MEGATRON_LM]: schedulers = self._schedulers dataloaders = self._dataloaders # Call model loading hooks that might have been registered with # accelerator.register_model_state_hook for hook in self._load_model_state_pre_hook.values(): hook(models, input_dir) map_location = load_model_func_kwargs.pop("map_location", None) if map_location is None: if self.num_processes > 1 and self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_NPU, ): map_location = "on_device" else: map_location = "cpu" load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, self.state.process_index, self.scaler, map_location, **load_model_func_kwargs, ) custom_checkpoints = [ f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None ] if len(custom_checkpoints) != len(self._custom_objects): err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:" err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" err += f"\n\tRegistered objects: {len(self._custom_objects)}\n" err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects," err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually." raise RuntimeError(err) else: logger.info(f"Loading in {len(custom_checkpoints)} custom states") for index, obj in enumerate(self._custom_objects): load_custom_state(obj, input_dir, index) def free_memory(self): """ Will release all references to the internal objects stored and call the garbage collector. You should call this method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, scheduler = ... >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) >>> accelerator.free_memory() >>> del model, optimizer, scheduler ``` """ self._schedulers = [] self._optimizers = [] self._models = [] self._dataloaders = [] self.deepspeed_engine_wrapped = None self.step = 0 release_memory() def clear(self): """ Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the garbage collector. You should call this method between two trainings with different models/optimizers. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> model, optimizer, scheduler = ... >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) >>> accelerator.free_memory() >>> del model, optimizer, scheduler ``` """ self.free_memory() def _get_named_parameters(self, *args): named_parameters = {} for obj in args: if isinstance(obj, torch.nn.Module): obj = extract_model_from_parallel(obj) named_parameters.update({n: p for n, p in obj.named_parameters()}) return named_parameters def _get_devices(self, *args): model_device = None optimizer_device = None for obj in args: # Loop through model parameters and stop at the first once we have its device. if isinstance(obj, torch.nn.Module): for param in obj.parameters(): model_device = param.device break # Loop through optimizer parameters groups and stop at the first once we have its device. if isinstance(obj, torch.optim.Optimizer): for param_group in obj.param_groups: if len(param_group["params"]) > 0: optimizer_device = param_group["params"][0].device break return (model_device, optimizer_device) def get_state_dict(self, model, unwrap=True): """ Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full precision. Args: model (`torch.nn.Module`): A PyTorch model sent through [`Accelerator.prepare`] unwrap (`bool`, *optional*, defaults to `True`): Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict Returns: `dict`: The state dictionary of the model potentially without full precision. Example: ```python >>> import torch >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> net = torch.nn.Linear(2, 2) >>> net = accelerator.prepare(net) >>> state_dict = accelerator.get_state_dict(net) ``` """ if self.distributed_type == DistributedType.DEEPSPEED: if self.deepspeed_config["zero_optimization"]["stage"] == 3: if model.zero_gather_16bit_weights_on_model_save(): state_dict = model._zero3_consolidated_16bit_state_dict() else: raise ValueError( "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " "set `zero3_save_16bit_model` to True when using `accelerate config`. " "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." ) else: from deepspeed.checkpoint.utils import clone_tensors_for_torch_save state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict()) elif self.distributed_type == DistributedType.FSDP: from torch.distributed.fsdp import FullStateDictConfig, StateDictType from torch.distributed.fsdp import FullyShardedDataParallel as FSDP full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config): state_dict = model.state_dict() else: if unwrap: model = self.unwrap_model(model) state_dict = model.state_dict() return state_dict def register_for_checkpointing(self, *objects): """ Makes note of `objects` and will save or load them in during `save_state` or `load_state`. These should be utilized when the state is being loaded or saved in the same script. It is not designed to be used in different scripts. <Tip> Every `object` must have a `load_state_dict` and `state_dict` function to be stored. </Tip> Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function. >>> obj = CustomObject() >>> accelerator.register_for_checkpointing(obj) >>> accelerator.save_state("checkpoint.pt") ``` """ invalid_objects = [] for obj in objects: if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): invalid_objects.append(obj) if len(invalid_objects) > 0: err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" for index, obj in enumerate(invalid_objects): err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" raise ValueError(err) self._custom_objects.extend(objects) @contextmanager def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None): """ Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing different will happen otherwise. A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is useful in blocks under `autocast` where you want to revert to fp32. Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(mixed_precision="fp16") >>> with accelerator.autocast(): ... train() ``` """ if cache_enabled: warnings.warn( "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. " "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.", FutureWarning, ) if self.autocast_handler is not None: self.autocast_handler.cache_enabled = True else: self.autocast_handler = AutocastKwargs(cache_enabled=True) if autocast_handler is None: autocast_handler = self.autocast_handler autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler) autocast_context.__enter__() # TODO: should the `yield` be in a try/finally block? yield autocast_context.__exit__(*sys.exc_info()) @property def optimizer_step_was_skipped(self): """ Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which case the learning rate should not be changed. """ for optimizer in self._optimizers: if optimizer.step_was_skipped: return True return False def skip_first_batches(self, dataloader, num_batches: int = 0): """ Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. Args: dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches. num_batches (`int`, *optional*, defaults to 0): The number of batches to skip Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2) >>> # for the first epoch only >>> for input, target in skipped_dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... optimizer.step() >>> # subsequent epochs >>> for input, target in dataloader: ... optimizer.zero_grad() ... ... ``` """ return skip_first_batches(dataloader, num_batches=num_batches) def __deepcopy__(self, memo): logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.") return self def verify_device_map(self, model: torch.nn.Module) -> bool: """ Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`. """ # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry. for m in model.modules(): if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1: return True return False
accelerate/src/accelerate/accelerator.py/0
{ "file_path": "accelerate/src/accelerate/accelerator.py", "repo_id": "accelerate", "token_count": 63109 }
4
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings import torch from .state import AcceleratorState, GradientState from .utils import DistributedType, honor_type, is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm def move_to_device(state, device): if isinstance(state, (list, tuple)): return honor_type(state, (move_to_device(t, device) for t in state)) elif isinstance(state, dict): return type(state)({k: move_to_device(v, device) for k, v in state.items()}) elif isinstance(state, torch.Tensor): return state.to(device) return state class AcceleratedOptimizer(torch.optim.Optimizer): """ Internal wrapper around a torch optimizer. Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient accumulation. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. device_placement (`bool`, *optional*, defaults to `True`): Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of `optimizer` on the right device. scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): The scaler to use in the step function if training with mixed precision. """ def __init__(self, optimizer, device_placement=True, scaler=None): self.optimizer = optimizer self.scaler = scaler self.accelerator_state = AcceleratorState() self.gradient_state = GradientState() self.device_placement = device_placement self._is_overflow = False if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) # Handle device placement if device_placement: state_dict = self.optimizer.state_dict() if self.accelerator_state.distributed_type == DistributedType.XLA: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) else: state_dict = move_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) @property def state(self): return self.optimizer.state @state.setter def state(self, state): self.optimizer.state = state @property def param_groups(self): return self.optimizer.param_groups @param_groups.setter def param_groups(self, param_groups): self.optimizer.param_groups = param_groups @property def defaults(self): return self.optimizer.defaults @defaults.setter def defaults(self, defaults): self.optimizer.defaults = defaults def add_param_group(self, param_group): self.optimizer.add_param_group(param_group) def load_state_dict(self, state_dict): if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) def state_dict(self): return self.optimizer.state_dict() def zero_grad(self, set_to_none=None): if self.gradient_state.sync_gradients: accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters if accept_arg: if set_to_none is None: set_to_none = True self.optimizer.zero_grad(set_to_none=set_to_none) else: if set_to_none is not None: raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") self.optimizer.zero_grad() def step(self, closure=None): if ( not self.gradient_state.is_xla_gradients_synced and self.accelerator_state.distributed_type == DistributedType.XLA ): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) self.gradient_state.is_xla_gradients_synced = True if self.gradient_state.sync_gradients: if self.scaler is not None: self.optimizer.step = self._optimizer_patched_step_method self.scaler.step(self.optimizer, closure) self.scaler.update() if not self._accelerate_step_called: # If the optimizer step was skipped, gradient overflow was detected. self._is_overflow = True else: self._is_overflow = False # Reset the step method to the original one self.optimizer.step = self._optimizer_original_step_method # Reset the indicator self._accelerate_step_called = False else: self.optimizer.step(closure) if self.accelerator_state.distributed_type == DistributedType.XLA: self.gradient_state.is_xla_gradients_synced = False def _switch_parameters(self, parameters_map): for param_group in self.optimizer.param_groups: param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] @property def is_overflow(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" warnings.warn( "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " "`optimizer.step_was_skipped` instead.", FutureWarning, ) return self._is_overflow @property def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow def __getstate__(self): _ignored_keys = [ "_accelerate_step_called", "_optimizer_original_step_method", "_optimizer_patched_step_method", ] return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} def __setstate__(self, state): self.__dict__.update(state) if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): def patched_step(*args, **kwargs): accelerated_optimizer._accelerate_step_called = True return method(*args, **kwargs) return patched_step
accelerate/src/accelerate/optimizer.py/0
{ "file_path": "accelerate/src/accelerate/optimizer.py", "repo_id": "accelerate", "token_count": 3103 }
5
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import io import math import time from copy import deepcopy from pathlib import Path import numpy as np import torch from torch.utils.data import DataLoader, Dataset from accelerate import Accelerator from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader from accelerate.state import AcceleratorState from accelerate.test_utils import RegressionDataset, are_the_same_tensors from accelerate.utils import ( DataLoaderConfiguration, DistributedType, gather, is_bf16_available, is_datasets_available, is_ipex_available, is_mlu_available, is_npu_available, is_xpu_available, set_seed, synchronize_rng_states, ) # TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting. if is_xpu_available(): from accelerate.test_utils import RegressionModel4XPU as RegressionModel else: from accelerate.test_utils import RegressionModel def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False): "Creates a dataloader that can also use the `SeedableRandomSampler`" if use_seedable_sampler: # The SeedableRandomSampler is needed during distributed setups # for full reproducability across processes with the `DataLoader` sampler = SeedableRandomSampler( generator=generator, data_source=train_set, num_samples=len(train_set), ) return DataLoader(train_set, batch_size=batch_size, sampler=sampler) else: return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) def print_main(state): print(f"Printing from the main process {state.process_index}") def print_local_main(state): print(f"Printing from the local main process {state.local_process_index}") def print_last(state): print(f"Printing from the last process {state.process_index}") def print_on(state, process_idx): print(f"Printing from process {process_idx}: {state.process_index}") def process_execution_check(): accelerator = Accelerator() num_processes = accelerator.num_processes # Test main_process_first context manager path = Path("check_main_process_first.txt") with accelerator.main_process_first(): if accelerator.is_main_process: time.sleep(0.1) # ensure main process takes longest with open(path, "a+") as f: f.write("Currently in the main process\n") else: with open(path, "a+") as f: f.write("Now on another process\n") accelerator.wait_for_everyone() if accelerator.is_main_process: with open(path) as f: text = "".join(f.readlines()) try: assert text.startswith("Currently in the main process\n"), "Main process was not first" if num_processes > 1: assert text.endswith("Now on another process\n"), "Main process was not first" assert ( text.count("Now on another process\n") == accelerator.num_processes - 1 ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}" except AssertionError: path.unlink() raise if accelerator.is_main_process and path.exists(): path.unlink() accelerator.wait_for_everyone() # Test the decorators f = io.StringIO() with contextlib.redirect_stdout(f): accelerator.on_main_process(print_main)(accelerator.state) result = f.getvalue().rstrip() if accelerator.is_main_process: assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0" else: assert f.getvalue().rstrip() == "", f'{result} != ""' f.truncate(0) f.seek(0) with contextlib.redirect_stdout(f): accelerator.on_local_main_process(print_local_main)(accelerator.state) if accelerator.is_local_main_process: assert f.getvalue().rstrip() == "Printing from the local main process 0" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) with contextlib.redirect_stdout(f): accelerator.on_last_process(print_last)(accelerator.state) if accelerator.is_last_process: assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) for process_idx in range(num_processes): with contextlib.redirect_stdout(f): accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx) if accelerator.process_index == process_idx: assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) def init_state_check(): # Test we can instantiate this twice in a row. state = AcceleratorState() if state.local_process_index == 0: print("Testing, testing. 1, 2, 3.") print(state) def rng_sync_check(): state = AcceleratorState() synchronize_rng_states(["torch"]) assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." if state.distributed_type == DistributedType.MULTI_GPU: synchronize_rng_states(["cuda"]) assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." elif state.distributed_type == DistributedType.MULTI_XPU: synchronize_rng_states(["xpu"]) assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU." generator = torch.Generator() synchronize_rng_states(["generator"], generator=generator) assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." if state.local_process_index == 0: print("All rng are properly synched.") def dl_preparation_check(): state = AcceleratorState() length = 32 * state.num_processes dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) print(state.process_index, result, type(dl)) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." if state.process_index == 0: print("Non-shuffled dataloader passing.") dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." if state.local_process_index == 0: print("Shuffled dataloader passing.") def central_dl_preparation_check(): state = AcceleratorState() length = 32 * state.num_processes dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, dispatch_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." if state.process_index == 0: print("Non-shuffled central dataloader passing.") dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, dispatch_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." if state.local_process_index == 0: print("Shuffled central dataloader passing.") def custom_sampler_check(): state = AcceleratorState() class CustomDataset(Dataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class CustomBatchSampler: def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True): self.batch_size = batch_size self.data_index = np.arange(dataset_length) self.shuffle = shuffle def __iter__(self): num_batches = len(self) if self.shuffle: index = np.random.permutation(self.data_index) else: index = self.data_index output = np.array_split(index, num_batches) yield from output def __len__(self): return math.ceil(len(self.data_index) / self.batch_size) dataset = CustomDataset(range(32 * state.num_processes)) sampler = CustomBatchSampler(len(dataset), batch_size=8) dl = DataLoader(dataset, batch_sampler=sampler) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index) # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler if hasattr(dl.batch_sampler, "batch_sampler"): assert isinstance( dl.batch_sampler.batch_sampler, CustomBatchSampler ), "Custom sampler was changed after calling `prepare_data_loader`" else: assert isinstance( dl.batch_sampler, CustomBatchSampler ), "Custom sampler was changed after calling `prepare_data_loader`" def check_seedable_sampler(): # Set seed set_seed(42) train_set = RegressionDataset(length=10, seed=42) train_dl = DataLoader(train_set, batch_size=2, shuffle=True) config = DataLoaderConfiguration(use_seedable_sampler=True) accelerator = Accelerator(dataloader_config=config) train_dl = accelerator.prepare(train_dl) original_items = [] for _ in range(3): for batch in train_dl: original_items.append(batch["x"]) original_items = torch.cat(original_items) # Set seed again and the epoch set_seed(42) train_dl.set_epoch(0) new_items = [] for _ in range(3): for batch in train_dl: new_items.append(batch["x"]) new_items = torch.cat(new_items) assert torch.allclose(original_items, new_items), "Did not obtain the same items with the same seed and epoch." def mock_training(length, batch_size, generator, use_seedable_sampler=False): set_seed(42) generator.manual_seed(42) train_set = RegressionDataset(length=length, seed=42) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) for epoch in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) loss.backward() optimizer.step() return train_set, model def training_check(use_seedable_sampler=False): state = AcceleratorState() generator = torch.Generator() batch_size = 8 length = batch_size * 4 * state.num_processes train_set, old_model = mock_training(length, batch_size * state.num_processes, generator, use_seedable_sampler) assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." accelerator = Accelerator() train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") dataloader_config = DataLoaderConfiguration(split_batches=True, use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader( train_set, generator, batch_size * state.num_processes, use_seedable_sampler ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.") if torch.cuda.is_available() or is_npu_available() or is_mlu_available(): # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 print("FP16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="fp16", dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." if torch.cuda.is_available(): # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True) print("Keep fp32 wrapper check.") AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16") model = torch.nn.Linear(2, 4) model = accelerator.prepare(model) model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True) # Run forward with fp16 as input. # When the model is with mixed precision wrapper, no error will be raised. input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device) output = model_with_fp32_wrapper(input_tensor) # BF16 support is only for CPU + TPU, and some GPU if is_bf16_available(): # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 print("BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." # IPEX support is only for CPU if is_ipex_available(): print("ipex BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", cpu=True, dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." # XPU support is only for XPU if is_xpu_available(): print("xpu BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", cpu=False, dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training." def test_split_between_processes_dataset(datasets_Dataset): state = AcceleratorState() data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)]) with state.split_between_processes(data, apply_padding=False) as results: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) with state.split_between_processes(data, apply_padding=False) as results: if state.is_last_process: assert ( len(results) == 1 ), f"Last process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" else: assert ( len(results) == 2 ), f"One of the intermediate processes did not receive two items. Process index: {state.process_index}; Length: {len(results)}" data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) with state.split_between_processes(data, apply_padding=True) as results: if state.num_processes == 1: assert ( len(results) == 1 ), f"Single process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" else: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" state.wait_for_everyone() def test_split_between_processes_list(): state = AcceleratorState() data = list(range(0, 2 * state.num_processes)) with state.split_between_processes(data) as results: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" data = list(range(0, (3 * state.num_processes) - 1)) with state.split_between_processes(data, apply_padding=True) as results: if state.is_last_process: # Test that the last process gets the extra item(s) num_samples_per_device = math.ceil(len(data) / state.num_processes) assert ( len(results) == num_samples_per_device ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}" state.wait_for_everyone() def test_split_between_processes_nested_dict(): state = AcceleratorState() a = [1, 2, 3, 4, 5, 6, 7, 8] b = ["a", "b", "c", "d", "e", "f", "g", "h"] c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]) if state.num_processes in (1, 2, 4): data = {"a": a, "b": b, "c": c} data_copy = deepcopy(data) with state.split_between_processes(data) as results: if state.process_index == 0: assert results["a"] == data_copy["a"][: 8 // state.num_processes] elif state.num_processes == 2: assert results["a"] == data_copy["a"][4:] elif state.process_index == 3: # We return a list each time assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}' if state.process_index == 0: assert results["b"] == data_copy["b"][: 8 // state.num_processes] elif state.num_processes == 2: assert results["b"] == data_copy["b"][4:] elif state.process_index == 3: assert results["b"] == data_copy["b"][-2:] if state.process_index == 0: assert torch.allclose( results["c"], data_copy["c"][: 8 // state.num_processes] ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}" elif state.num_processes == 2: assert torch.allclose( results["c"], data_copy["c"][4:] ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}" elif state.process_index == 3: assert torch.allclose( results["c"], data_copy["c"][-2:] ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}" state.wait_for_everyone() def test_split_between_processes_tensor(): state = AcceleratorState() if state.num_processes > 1: data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device) with state.split_between_processes(data) as results: if state.process_index == 0: assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device)) else: assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device)) state.wait_for_everyone() def test_trigger(): accelerator = Accelerator() # should start with being false assert accelerator.check_trigger() is False # set a breakpoint on the main process if accelerator.is_main_process: accelerator.set_trigger() # check it's been activated across all processes # calls `all_reduce` and triggers a sync assert accelerator.check_trigger() is True # check it's been reset after the sync assert accelerator.check_trigger() is False def main(): accelerator = Accelerator() state = accelerator.state if state.local_process_index == 0: print("**Initialization**") init_state_check() state.wait_for_everyone() if state.distributed_type == DistributedType.MULTI_GPU: num_processes_per_node = torch.cuda.device_count() else: num_processes_per_node = state.num_processes # We only run this test on non-multinode if num_processes_per_node == state.num_processes: if state.process_index == 0: print("\n**Test process execution**") process_execution_check() if state.process_index == 0: print("\n**Test split between processes as a list**") test_split_between_processes_list() if state.process_index == 0: print("\n**Test split between processes as a dict**") test_split_between_processes_nested_dict() if state.process_index == 0: print("\n**Test split between processes as a tensor**") test_split_between_processes_tensor() if state.process_index == 0: print("\n**Test split between processes as a datasets.Dataset**") if is_datasets_available(): from datasets import Dataset as datasets_Dataset test_split_between_processes_dataset(datasets_Dataset) else: print("Skipped because Hugging Face datasets is not available") if state.local_process_index == 0: print("\n**Test random number generator synchronization**") rng_sync_check() if state.local_process_index == 0: print("\n**DataLoader integration test**") dl_preparation_check() if state.distributed_type != DistributedType.XLA: central_dl_preparation_check() custom_sampler_check() check_seedable_sampler() # Trainings are not exactly the same in DeepSpeed and CPU mode if state.distributed_type == DistributedType.DEEPSPEED: return if state.local_process_index == 0: print("\n**Training integration test**") training_check(use_seedable_sampler=False) training_check(use_seedable_sampler=True) if state.local_process_index == 0: print("\n**Breakpoint trigger test**") test_trigger() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_script.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_script.py", "repo_id": "accelerate", "token_count": 12418 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import gc import importlib import inspect import json import logging import os import re import shutil import tempfile import warnings from collections import OrderedDict, defaultdict from typing import Dict, List, Optional, Tuple, Union import packaging import torch import torch.nn as nn from ..state import AcceleratorState from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME from .dataclasses import AutocastKwargs, CustomDtype, DistributedType from .imports import ( is_mlu_available, is_mps_available, is_npu_available, is_peft_available, is_torch_xla_available, is_xpu_available, ) from .offload import load_offloaded_weight, offload_weight, save_offload_index from .tqdm import is_tqdm_available, tqdm from .versions import compare_versions if is_npu_available(check_device=False): import torch_npu # noqa: F401 if is_mlu_available(check_device=False): import torch_mlu # noqa: F401 from safetensors import safe_open from safetensors.torch import load_file as safe_load_file WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" logger = logging.getLogger(__name__) def is_peft_model(model): from .other import extract_model_from_parallel if is_peft_available(): from peft import PeftModel return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel) def check_device_same(first_device, second_device): """ Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same Args: first_device (`torch.device`): First device to check second_device (`torch.device`): Second device to check """ if first_device.type != second_device.type: return False if first_device.type == "cuda" and first_device.index is None: # In case the first_device is a cuda device and have # the index attribute set to `None`, default it to `0` first_device = torch.device("cuda", index=0) if second_device.type == "cuda" and second_device.index is None: # In case the second_device is a cuda device and have # the index attribute set to `None`, default it to `0` second_device = torch.device("cuda", index=0) return first_device == second_device def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ mem_size = -1 err_msg = ( f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')." ) try: if isinstance(size, int): mem_size = size elif size.upper().endswith("GIB"): mem_size = int(float(size[:-3]) * (2**30)) elif size.upper().endswith("MIB"): mem_size = int(float(size[:-3]) * (2**20)) elif size.upper().endswith("KIB"): mem_size = int(float(size[:-3]) * (2**10)) elif size.upper().endswith("GB"): int_size = int(float(size[:-2]) * (10**9)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("MB"): int_size = int(float(size[:-2]) * (10**6)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("KB"): int_size = int(float(size[:-2]) * (10**3)) mem_size = int_size // 8 if size.endswith("b") else int_size except ValueError: raise ValueError(err_msg) if mem_size < 0: raise ValueError(err_msg) return mem_size def dtype_byte_size(dtype: torch.dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(torch.float32) 4 ``` """ if dtype == torch.bool: return 1 / 8 elif dtype == CustomDtype.INT2: return 1 / 4 elif dtype == CustomDtype.INT4: return 1 / 2 elif dtype == CustomDtype.FP8: return 1 bit_search = re.search(r"[^\d](\d+)$", str(dtype)) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8 def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. """ _SIZE = { torch.int64: 8, torch.float32: 4, torch.int32: 4, torch.bfloat16: 2, torch.float16: 2, torch.int16: 2, torch.uint8: 1, torch.int8: 1, torch.bool: 1, torch.float64: 8, } try: storage_ptr = tensor.untyped_storage().data_ptr() storage_size = tensor.untyped_storage().nbytes() except Exception: # Fallback for torch==1.10 try: storage_ptr = tensor.storage().data_ptr() storage_size = tensor.storage().size() * _SIZE[tensor.dtype] except NotImplementedError: # Fallback for meta storage storage_ptr = 0 # On torch >=2.0 this is the tensor size storage_size = tensor.nelement() * _SIZE[tensor.dtype] return tensor.device, storage_ptr, storage_size def shard_checkpoint( state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME ): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file. """ max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [{}] last_block_size = 0 total_size = 0 storage_id_to_block = {} for key, weight in state_dict.items(): # when bnb serialization is used the weights in the state dict can be strings # check: https://github.com/huggingface/transformers/pull/24416 for more details if isinstance(weight, str): continue else: storage_id = id_tensor_storage(weight) # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` if storage_id in storage_id_to_block: block_id = storage_id_to_block[storage_id] sharded_state_dicts[block_id][key] = weight continue weight_size = weight.numel() * dtype_byte_size(weight.dtype) # If this weight is going to tip up over the maximal size, we split. if last_block_size + weight_size > max_shard_size: sharded_state_dicts.append({}) last_block_size = 0 sharded_state_dicts[-1][key] = weight last_block_size += weight_size total_size += weight_size storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = weights_name.replace(".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin") shard_file = shard_file.replace( ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" ) shards[shard_file] = shard for key in shard.keys(): weight_map[key] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} return shards, index def set_module_tensor_to_device( module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None, dtype: Optional[Union[str, torch.dtype]] = None, fp16_statistics: Optional[torch.HalfTensor] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). dtype (`torch.dtype`, *optional*): If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to the dtype of the existing parameter in the model. fp16_statistics (`torch.HalfTensor`, *optional*): The list of fp16 statistics to set on the module, used for 8 bit model serialization. tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight on the device for all others, instead of duplicating memory. """ # Recurse if needed if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") is_buffer = tensor_name in module._buffers old_value = getattr(module, tensor_name) # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. if ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device in tied_params_map[value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device] return elif ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device in tied_params_map[old_value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device] return if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") if value is not None: if old_value.shape != value.shape: raise ValueError( f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.' ) if dtype is None: # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model value = value.to(old_value.dtype) elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): value = value.to(dtype) param = module._parameters[tensor_name] if tensor_name in module._parameters else None param_cls = type(param) device_quantization = None with torch.no_grad(): # leave it on cpu first before moving them to cuda # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 if ( param is not None and param.device.type != "cuda" and torch.device(device).type == "cuda" and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"] ): device_quantization = device device = "cpu" # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if is_npu_available() and isinstance(device, int): device = f"npu:{device}" elif is_mlu_available() and isinstance(device, int): device = f"mlu:{device}" if is_xpu_available() and isinstance(device, int): device = f"xpu:{device}" if value is None: new_value = old_value.to(device) if dtype is not None and device in ["meta", torch.device("meta")]: if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): new_value = new_value.to(dtype) if not is_buffer: module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if device_quantization is not None: device = device_quantization if is_buffer: module._buffers[tensor_name] = new_value elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device): param_cls = type(module._parameters[tensor_name]) kwargs = module._parameters[tensor_name].__dict__ if param_cls.__name__ in ["Int8Params", "FP4Params"]: if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32: # downcast to fp16 if any - needed for 8bit serialization new_value = new_value.to(torch.float16) # quantize module that are going to stay on the cpu so that we offload quantized weights if device == "cpu" and param_cls.__name__ == "Int8Params": new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu") new_value.CB = new_value.CB.to("cpu") new_value.SCB = new_value.SCB.to("cpu") else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device) else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: module._parameters[tensor_name].SCB = fp16_statistics.to(device) del fp16_statistics # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight if ( module.__class__.__name__ == "Linear8bitLt" and getattr(module.weight, "SCB", None) is None and str(module.weight.device) != "meta" ): # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "SCB", None) and device_index is not None: if module.bias is not None and module.bias.device.type != "meta": # if a bias exists, we need to wait until the bias is set on the correct device module = module.cuda(device_index) elif module.bias is None: # if no bias exists, we can quantize right away module = module.cuda(device_index) elif module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None: # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "quant_state", None) and device_index is not None: module.weight = module.weight.cuda(device_index) # clean pre and post foward hook if is_npu_available(): torch.npu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_xpu_available(): torch.xpu.empty_cache() else: torch.cuda.empty_cache() # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in # order to avoid duplicating memory, see above. if ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device not in tied_params_map[old_value.data_ptr()] ): tied_params_map[old_value.data_ptr()][device] = new_value elif ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device not in tied_params_map[value.data_ptr()] ): tied_params_map[value.data_ptr()][device] = new_value def named_module_tensors( module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False ): """ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. Args: module (`torch.nn.Module`): The module we want the tensors on. include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. remove_non_persistent (`bool`, *optional*, defaults to `False`): Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = True """ yield from module.named_parameters(recurse=recurse) if include_buffers: non_persistent_buffers = set() if remove_non_persistent: non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse) for named_buffer in module.named_buffers(recurse=recurse): name, _ = named_buffer if name not in non_persistent_buffers: yield named_buffer def get_non_persistent_buffers(module: nn.Module, recurse: bool = False): """ Gather all non persistent buffers of a given modules into a set Args: module (`nn.Module`): The module we want the non persistent buffers on. recurse (`bool`, *optional*, defaults to `False`): Whether or not to go look in every submodule or just return the direct non persistent buffers. """ non_persistent_buffers_set = module._non_persistent_buffers_set if recurse: for _, m in module.named_modules(): non_persistent_buffers_set |= m._non_persistent_buffers_set return non_persistent_buffers_set class FindTiedParametersResult(list): """ This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not a list or on the `values` method as in the future this will be removed. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def values(self): # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here. return sum([x[1:] for x in self], []) def check_tied_parameters_in_config(model: nn.Module): """ Check if there is any indication in the given model that some weights should be tied. Args: model (`torch.nn.Module`): The model to inspect Returns: bool: True if the model needs to have tied weights """ # based on model.tie_weights() method has_tied_word_embedding = False has_tied_encoder_decoder = False has_tied_module = False if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]: has_tied_word_embedding = ( hasattr(model, "config") and getattr(model.config, "tie_word_embeddings", False) and model.get_output_embeddings() ) has_tied_encoder_decoder = ( hasattr(model, "config") and getattr(model.config, "is_encoder_decoder", False) and getattr(model.config, "tie_encoder_decoder", False) ) has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules()) return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module]) def _get_param_device(param, device_map): if param in device_map: return device_map[param] parent_param = ".".join(param.split(".")[:-1]) if parent_param == param: raise ValueError(f"The `device_map` does not contain the module {param}.") else: return _get_param_device(parent_param, device_map) def check_tied_parameters_on_same_device(tied_params, device_map): """ Check if tied parameters are on the same device Args: tied_params (`List[List[str]]`): A list of lists of parameter names being all tied together. device_map (`Dict[str, Union[int, str, torch.device]]`): A map that specifies where each submodule should go. """ for tie_param in tied_params: tie_param_devices = {} for param in tie_param: tie_param_devices[param] = _get_param_device(param, device_map) if len(set(tie_param_devices.values())) > 1: logger.warn( f"Tied parameters are on different devices: {tie_param_devices}. " "Please modify your custom device map or set `device_map='auto'`. " ) def find_tied_parameters(model: nn.Module, **kwargs): """ Find the tied parameters in a given model. <Tip warning={true}> The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore them. </Tip> Args: model (`torch.nn.Module`): The model to inspect. Returns: List[List[str]]: A list of lists of parameter names being all tied together. Example: ```py >>> from collections import OrderedDict >>> import torch.nn as nn >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) >>> model.linear2.weight = model.linear1.weight >>> find_tied_parameters(model) [['linear1.weight', 'linear2.weight']] ``` """ # Initialize result and named_parameters before recursing. named_parameters = kwargs.get("named_parameters", None) prefix = kwargs.get("prefix", "") result = kwargs.get("result", {}) if named_parameters is None: named_parameters = {n: p for n, p in model.named_parameters()} else: # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters` # of the submodule it belongs to. So while recursing we track the names that are not in the initial # `named_parameters`. for name, parameter in model.named_parameters(): full_name = name if prefix == "" else f"{prefix}.{name}" if full_name not in named_parameters: # When we find one, it has to be one of the existing parameters. for new_name, new_param in named_parameters.items(): if new_param is parameter: if new_name not in result: result[new_name] = [] result[new_name].append(full_name) # Once we have treated direct parameters, we move to the child modules. for name, child in model.named_children(): child_name = name if prefix == "" else f"{prefix}.{name}" find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result) return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()]) def retie_parameters(model, tied_params): """ Reties tied parameters in a given model if the link was broken (for instance when adding hooks). Args: model (`torch.nn.Module`): The model in which to retie parameters. tied_params (`List[List[str]]`): A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. """ for tied_group in tied_params: param_to_tie = None # two loops : the first one to set param_to_tie , the second one to change the values of tied_group for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) param = getattr(module, splits[-1]) if param_to_tie is None and param.device != torch.device("meta"): param_to_tie = param break if param_to_tie is not None: for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) setattr(module, splits[-1], param_to_tie) def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype: """ Just does torch.dtype(dtype) if necessary. """ if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) return dtype def compute_module_sizes( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, buffers_only: bool = False, ): """ Compute the size of each submodule of a given model. """ if dtype is not None: dtype = _get_proper_dtype(dtype) dtype_size = dtype_byte_size(dtype) if special_dtypes is not None: special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} module_sizes = defaultdict(int) module_list = [] if not buffers_only: module_list = named_module_tensors(model, recurse=True) else: module_list = model.named_buffers(recurse=True) for name, tensor in module_list: if special_dtypes is not None and name in special_dtypes: size = tensor.numel() * special_dtypes_size[name] elif dtype is None: size = tensor.numel() * dtype_byte_size(tensor.dtype) elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): # According to the code in set_module_tensor_to_device, these types won't be converted # so use their original size here size = tensor.numel() * dtype_byte_size(tensor.dtype) else: size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) name_parts = name.split(".") for idx in range(len(name_parts) + 1): module_sizes[".".join(name_parts[:idx])] += size return module_sizes def compute_module_total_buffer_size( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ): """ Compute the total size of buffers in each submodule of a given model. """ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True) return module_sizes.get("", 0) def get_max_layer_size( modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str] ): """ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The definition of a layer being: - a module with no direct children (just parameters and buffers) - a module whose class name is in the list `no_split_module_classes` Args: modules (`List[Tuple[str, torch.nn.Module]]`): The list of named modules where we want to determine the maximum layer size. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. Returns: `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. """ max_size = 0 layer_names = [] modules_to_treat = modules.copy() while len(modules_to_treat) > 0: module_name, module = modules_to_treat.pop(0) modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # No splitting this one so we compare to the max_size size = module_sizes[module_name] if size > max_size: max_size = size layer_names = [module_name] elif size == max_size: layer_names.append(module_name) else: modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat return max_size, layer_names def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None): """ Get the maximum memory available if nothing is passed, converts string to int otherwise. """ import psutil if max_memory is None: if not (torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_xpu_available()): max_memory = {} else: # Make sure CUDA is initialized on each GPU to have the right memory info. if is_npu_available(): for i in range(torch.npu.device_count()): _ = torch.tensor(0, device=torch.device("npu", i)) max_memory = {i: torch.npu.mem_get_info(i)[0] for i in range(torch.npu.device_count())} elif is_mlu_available(): for i in range(torch.mlu.device_count()): _ = torch.tensor(0, device=torch.device("mlu", i)) max_memory = {i: torch.mlu.mem_get_info(i)[0] for i in range(torch.mlu.device_count())} elif is_xpu_available(): for i in range(torch.xpu.device_count()): _ = torch.tensor(0, device=torch.device("xpu", i)) max_memory = {i: torch.xpu.max_memory_allocated(i) for i in range(torch.xpu.device_count())} else: for i in range(torch.cuda.device_count()): _ = torch.tensor([0], device=i) max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())} # allocate everything in the mps device as the RAM is shared if is_mps_available(): max_memory["mps"] = psutil.virtual_memory().available else: max_memory["cpu"] = psutil.virtual_memory().available return max_memory for key in max_memory: if isinstance(max_memory[key], str): max_memory[key] = convert_file_size_to_int(max_memory[key]) # Need to sort the device by type to make sure that we allocate the gpu first. # As gpu/npu/xpu are represented by int, we need to sort them first. gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)] gpu_devices.sort() # check if gpu/npu/xpu devices are available and if not, throw a warning if is_npu_available(): num_devices = torch.npu.device_count() elif is_mlu_available(): num_devices = torch.mlu.device_count() elif is_xpu_available(): num_devices = torch.xpu.device_count() else: num_devices = torch.cuda.device_count() for device in gpu_devices: if device >= num_devices or device < 0: logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}") # Add the other devices in the preset order if they are available all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()] # Raise an error if a device is not recognized for k in max_memory.keys(): if k not in all_devices: raise ValueError( f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'" ) max_memory = {k: max_memory[k] for k in all_devices} return max_memory def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""): """ Cleans a device_map by grouping all submodules that go on the same device together. """ # Get the value of the current module and if there is only one split across several keys, regroup it. prefix = "" if module_name == "" else f"{module_name}." values = [v for k, v in device_map.items() if k.startswith(prefix)] if len(set(values)) == 1 and len(values) > 1: for k in [k for k in device_map if k.startswith(prefix)]: del device_map[k] device_map[module_name] = values[0] # Recurse over the children children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)] idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) for child in children_modules: clean_device_map(device_map, module_name=child) return device_map def load_offloaded_weights(model, index, offload_folder): """ Loads the weights from the offload folder into the model. Args: model (`torch.nn.Module`): The model to load the weights into. index (`dict`): A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the model. offload_folder (`str`): The folder where the offloaded weights are stored. """ if index is None or len(index) == 0: # Nothing to do return for param_name, metadata in index.items(): if "SCB" in param_name: continue fp16_statistics = None if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys(): weight_name = param_name.replace("weight", "SCB") fp16_statistics = load_offloaded_weight( os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name] ) tensor_file = os.path.join(offload_folder, f"{param_name}.dat") weight = load_offloaded_weight(tensor_file, metadata) set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics) def get_balanced_memory( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, low_zero: bool = False, ): """ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). low_zero (`bool`, *optional*): Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the Transformers generate function). """ # Get default / clean up max_memory user_not_set_max_memory = max_memory is None max_memory = get_max_memory(max_memory) if is_npu_available(): num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0]) elif is_mlu_available(): num_devices = len([d for d in max_memory if torch.device(d).type == "mlu" and max_memory[d] > 0]) elif is_xpu_available(): num_devices = len( [ d for d in max_memory if ( d != "cpu" and (torch.device(d).type == "xpu" or torch.xpu.get_device_properties(d).dev_type == "gpu") ) and max_memory[d] > 0 ] ) else: num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0]) if num_devices == 0: return max_memory if num_devices == 1: # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer low_zero = False # If user just asked us to handle memory usage, we should avoid OOM if user_not_set_max_memory: for key in max_memory.keys(): if isinstance(key, int): max_memory[key] *= 0.9 # 90% is a good compromise logger.info( f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. " "You can set `max_memory` in to a higher value to use more memory (at your own risk)." ) break # only one device module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to # add which is the biggest of: # - the size of no split block (if applicable) # - the mean of the layer sizes if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] # Identify the size of the no_split_block modules if len(no_split_module_classes) > 0: no_split_children = {} for name, size in module_sizes.items(): if name == "": continue submodule = model for submodule_name in name.split("."): submodule = getattr(submodule, submodule_name) class_name = submodule.__class__.__name__ if class_name in no_split_module_classes and class_name not in no_split_children: no_split_children[class_name] = size if set(no_split_children.keys()) == set(no_split_module_classes): break buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 else: buffer = 0 # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} # Once removed, leaves are the final modules. leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1)) buffer = int(1.25 * max(buffer, mean_leaves)) per_gpu += buffer # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them) gpus_idx_list = list( sorted( device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0 ) ) # The last device is left with max_memory just in case the buffer is not enough. for idx in gpus_idx_list[:-1]: max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx]) if low_zero: min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) max_memory[0] = min(min_zero, max_memory[0]) return max_memory def calculate_maximum_sizes(model: torch.nn.Module): "Computes the total size of the model and its largest layer" sizes = compute_module_sizes(model) # `transformers` models store this information for us no_split_modules = getattr(model, "_no_split_modules", None) if no_split_modules is None: no_split_modules = [] modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules) total_size = sizes[""] return total_size, largest_layer def infer_auto_device_map( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None, verbose: bool = False, clean_result: bool = True, offload_buffers: bool = False, ): """ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, such that: - we don't exceed the memory available of any of the GPU. - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that has the largest size. - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk that has the largest size. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). verbose (`bool`, *optional*, defaults to `False`): Whether or not to provide debugging statements as the function builds the device_map. clean_result (`bool`, *optional*, defaults to `True`): Clean the resulting device_map by grouping all submodules that go on the same device together. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. """ # Get default / clean up max_memory max_memory = get_max_memory(max_memory) if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] devices = list(max_memory.keys()) if "disk" not in devices: devices.append("disk") gpus = [device for device in devices if device not in ["cpu", "disk"]] # Devices that need to keep space for a potential offloaded layer. if "mps" in gpus: main_devices = ["mps"] elif len(gpus) > 0: main_devices = [gpus[0], "cpu"] else: main_devices = ["cpu"] module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) tied_parameters = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_parameters) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) device_map = OrderedDict() current_device = 0 current_memory_used = 0 device_memory_used = {} device_buffer_sizes = {} # Direct submodules and parameters modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) # Initialize maximum largest layer, to know which space to keep in memory max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) # Ready ? This is going to be a bit messy. while len(modules_to_treat) > 0: name, module = modules_to_treat.pop(0) if verbose: print(f"\nTreating module {name}.") # Max size in the remaining layers may have changed since we took one, so we maybe update it. max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")] if len(max_layer_names) == 0: max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) # Assess size needed module_size = module_sizes[name] # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module # and the other is not. # Note: If we are currently processing the name `compute.weight`, an other parameter named e.g. `compute.weight_submodule.parameter` # needs to be considered outside the current module, hence the check with additional dots. tied_param_goups = [ tied_group for tied_group in tied_parameters if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) ] if verbose and len(tied_param_goups) > 0: print(f" Found the relevant tied param groups {tied_param_goups}") # Then we keep track of all the parameters that are tied to the current module, but not in the current module tied_params = sum( [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_goups], [] ) if verbose and len(tied_params) > 0: print(f" So those parameters need to be taken into account {tied_params}") device = devices[current_device] current_max_size = max_memory[device] if device != "disk" else None current_memory_reserved = 0 # Reduce max size available by the largest layer. if devices[current_device] in main_devices: current_max_size = current_max_size - max_layer_size current_memory_reserved = max_layer_size # Case 1 -> We're too big! if current_max_size is not None and current_memory_used + module_size > current_max_size: # Split or not split? modules_children = ( [] if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) else list(module.named_children()) ) if verbose: print( f"Not enough space on {devices[current_device]} to put {name} (space available " f"{current_max_size - current_memory_used}, module size {module_size})." ) if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # -> no split, we go to the next device if verbose: print("This module cannot be split, going to the next device.") device_memory_used[device] = current_memory_used + current_memory_reserved current_device += 1 modules_to_treat = [(name, module)] + modules_to_treat current_memory_used = 0 else: # -> split, we replace the module studied by its children + parameters if verbose: print(f"Splitting {name}.") modules_children = list(module.named_parameters(recurse=False)) + modules_children modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters. elif len(tied_params) > 0: # First locate all tied modules tied_module_names = [] tied_modules = [] for tied_param in tied_params: tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0] tied_module_names.append(modules_to_treat[tied_module_index][0]) tied_modules.append(modules_to_treat[tied_module_index][1]) if verbose: print( f" It looks like {name} is going to fit on {devices[current_device]} but we have tied " f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}" ) # Let's see if it all fits first module_size_with_ties = module_size for tied_param, tied_module_name in zip(tied_params, tied_module_names): module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param] if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size: # We really really fit! if verbose: print(f"Putting {name} and {tied_module_names} on {devices[current_device]}.") current_memory_used += module_size_with_ties device_map[name] = devices[current_device] for tied_module_name in tied_module_names: if tied_module_name in [m[0] for m in modules_to_treat]: # The module may have been removed by a previous iteration of this loop. tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][ 0 ] modules_to_treat.pop(tied_module_index) device_map[tied_module_name] = devices[current_device] if not offload_buffers and isinstance(module, nn.Module): current_buffer_size = compute_module_total_buffer_size( module, dtype=dtype, special_dtypes=special_dtypes ) device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size else: # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it # smaller or do we need to go on the next device? if verbose: print( f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space " f"available {current_max_size - current_memory_used}, needed size {module_size_with_ties})." ) split_happened = False for tied_module_name, tied_module in zip(tied_module_names, tied_modules): tied_module_children = list(tied_module.named_children()) if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: # can't break this one. continue if verbose: print(f"Splitting {tied_module_name}.") tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0] modules_to_treat = ( [(name, module)] + modules_to_treat[:tied_module_index] + tied_module_children + modules_to_treat[tied_module_index + 1 :] ) # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) split_happened = True break if not split_happened: # If the tied module is not split, we go to the next device if verbose: print("None of the tied module can be split, going to the next device.") device_memory_used[device] = current_memory_used + current_memory_reserved current_device += 1 modules_to_treat = [(name, module)] + modules_to_treat current_memory_used = 0 else: if verbose: if current_max_size is None: print(f"Putting {name} (size={module_size}) on {devices[current_device]}.") else: print( f"Putting {name} (size={module_size}) on {devices[current_device]} " f"(available={current_max_size - current_memory_used})." ) current_memory_used += module_size device_memory_used[device] = current_memory_used + current_memory_reserved device_map[name] = devices[current_device] if not offload_buffers and isinstance(module, nn.Module): current_buffer_size = compute_module_total_buffer_size( module, dtype=dtype, special_dtypes=special_dtypes ) device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size if clean_result: device_map = clean_device_map(device_map) non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0) if non_gpu_buffer_size > 0 and not offload_buffers: is_buffer_fit_any_gpu = False for gpu_device, gpu_max_memory in max_memory.items(): if gpu_device == "cpu" or gpu_device == "disk": continue if not is_buffer_fit_any_gpu: gpu_memory_used = device_memory_used.get(gpu_device, 0) if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used: is_buffer_fit_any_gpu = True if len(gpus) > 0 and not is_buffer_fit_any_gpu: warnings.warn( f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does " f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using " f"offload_buffers=True." ) return device_map def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]): """ Checks a device map covers everything in a given model. Args: model (`torch.nn.Module`): The model to check the device map against. device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. """ all_model_tensors = [name for name, _ in model.state_dict().items()] for module_name in device_map.keys(): if module_name == "": all_model_tensors.clear() break else: all_model_tensors = [ name for name in all_model_tensors if not name == module_name and not name.startswith(module_name + ".") ] if len(all_model_tensors) > 0: non_covered_params = ", ".join(all_model_tensors) raise ValueError( f"The device_map provided does not give any device for the following parameters: {non_covered_params}" ) def load_state_dict(checkpoint_file, device_map=None): """ Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the weights can be fast-loaded directly on the GPU. Args: checkpoint_file (`str`): The path to the checkpoint to load. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. """ if checkpoint_file.endswith(".safetensors"): with safe_open(checkpoint_file, framework="pt") as f: metadata = f.metadata() weight_names = f.keys() if metadata is None: logger.warn( f"The safetensors archive passed at {checkpoint_file} does not contain metadata. " "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata." ) metadata = {"format": "pt"} if metadata.get("format") not in ["pt", "tf", "flax"]: raise OSError( f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " "you save your model with the `save_pretrained` method." ) elif metadata["format"] != "pt": raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.") if device_map is None: return safe_load_file(checkpoint_file) else: # if we only have one device we can load everything directly if len(set(device_map.values())) == 1: return safe_load_file(checkpoint_file, device=list(device_map.values())[0]) devices = list(set(device_map.values()) - {"disk"}) # cpu device should always exist as fallback option if "cpu" not in devices: devices.append("cpu") # For each device, get the weights that go there device_weights = {device: [] for device in devices} for module_name, device in device_map.items(): if device in devices: device_weights[device].extend( [k for k in weight_names if k == module_name or k.startswith(module_name + ".")] ) # all weights that haven't defined a device should be loaded on CPU device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])]) tensors = {} if is_tqdm_available(): progress_bar = tqdm( main_process_only=False, total=sum([len(device_weights[device]) for device in devices]), unit="w", smoothing=0, leave=False, ) else: progress_bar = None for device in devices: target_device = device if is_xpu_available(): current_safetensors_version = packaging.version.parse(importlib.metadata.version("safetensors")) if compare_versions(current_safetensors_version, "<", "0.4.2"): raise ModuleNotFoundError( f"You need at least safetensors 0.4.2 for Intel GPU, while you have {current_safetensors_version}" ) if isinstance(device, int): target_device = f"xpu:{device}" with safe_open(checkpoint_file, framework="pt", device=target_device) as f: for key in device_weights[device]: if progress_bar is not None: progress_bar.set_postfix(dev=device, refresh=False) progress_bar.set_description(key) tensors[key] = f.get_tensor(key) if progress_bar is not None: progress_bar.update() if progress_bar is not None: progress_bar.close() return tensors else: return torch.load(checkpoint_file, map_location=torch.device("cpu")) def get_state_dict_offloaded_model(model: nn.Module): """ Returns the state dictionary for an offloaded model via iterative onloading Args: model (`torch.nn.Module`): The offloaded model we want to save """ from ..hooks import AlignDevicesHook state_dict = {} placeholders = set() for name, module in model.named_modules(): if name == "": continue if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: original_device = module._hf_hook.execution_device # assign hook execution device to cpu module._hf_hook.execution_device = "cpu" # onload meta tensors to execution device try: module._hf_hook.pre_forward(module) except MemoryError: raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None module_state_dict = module.state_dict() # offload meta tensors from cpu module._hf_hook.post_forward(module, torch.tensor([])) # re-assign hook to original execution device module._hf_hook.execution_device = original_device else: module_state_dict = module.state_dict() for key in module_state_dict: # ignore placeholder parameters that are still on the meta device if module_state_dict[key].device == torch.device("meta"): placeholders.add(name + f".{key}") continue params = module_state_dict[key] state_dict[name + f".{key}"] = params for key in placeholders.copy(): if key in state_dict: placeholders.remove(key) if placeholders: logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}") return state_dict def load_checkpoint_in_model( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: bool = False, offload_buffers: bool = False, keep_in_fp32_modules: List[str] = None, offload_8bit_bnb: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded. <Tip warning={true}> Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. </Tip> Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the buffers in the weights offloaded to disk. keep_in_fp32_modules(`List[str]`, *optional*): A list of the modules that we keep in `torch.float32` dtype. offload_8bit_bnb (`bool`, *optional*): Whether or not to enable offload of 8-bit modules on cpu/disk. """ if offload_8bit_bnb: from .bnb import quantize_and_offload_8bit tied_params = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_params) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) if device_map is not None: check_tied_parameters_on_same_device(tied_params, device_map) if offload_folder is None and device_map is not None and "disk" in device_map.values(): raise ValueError( "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." ) elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): os.makedirs(offload_folder, exist_ok=True) if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) checkpoint_files = None index_filename = None if os.path.isfile(checkpoint): if str(checkpoint).endswith(".json"): index_filename = checkpoint else: checkpoint_files = [checkpoint] elif os.path.isdir(checkpoint): # check if the whole state dict is present potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] if len(potential_state_bin) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] elif len(potential_state_safetensor) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] else: # otherwise check for sharded checkpoints potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] if len(potential_index) == 0: raise ValueError( f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" ) elif len(potential_index) == 1: index_filename = os.path.join(checkpoint, potential_index[0]) else: raise ValueError( f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." ) else: raise ValueError( "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." ) if index_filename is not None: checkpoint_folder = os.path.split(index_filename)[0] with open(index_filename) as f: index = json.loads(f.read()) if "weight_map" in index: index = index["weight_map"] checkpoint_files = sorted(list(set(index.values()))) checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] # Logic for missing/unexepected keys goes here. offload_index = {} if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} buffer_names = [name for name, _ in model.named_buffers()] for checkpoint_file in checkpoint_files: checkpoint = load_state_dict(checkpoint_file, device_map=device_map) if device_map is None: model.load_state_dict(checkpoint, strict=False) else: for param_name, param in checkpoint.items(): # skip SCB parameter (for 8-bit serialization) if "SCB" in param_name: continue module_name = param_name while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] new_dtype = dtype if dtype is not None and torch.is_floating_point(param): if keep_in_fp32_modules is not None and dtype == torch.float16: proceed = False for key in keep_in_fp32_modules: if ((key in param_name) and (key + "." in param_name)) or key == param_name: proceed = True break if proceed: new_dtype = torch.float32 if "weight" in param_name and param_name.replace("weight", "SCB") in checkpoint.keys(): if param.dtype == torch.int8: fp16_statistics = checkpoint[param_name.replace("weight", "SCB")] else: fp16_statistics = None if param_device == "disk": if offload_buffers or param_name not in buffer_names: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics ) continue else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, offload_folder, index=offload_index) elif param_device == "cpu" and offload_state_dict: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics ) else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, state_dict_folder, index=state_dict_index) else: set_module_tensor_to_device( model, param_name, param_device, value=param, dtype=new_dtype, fp16_statistics=fp16_statistics, ) # Force Python to clean up. del checkpoint gc.collect() save_offload_index(offload_index, offload_folder) # Load back offloaded state dict on CPU if offload_state_dict: load_offloaded_weights(model, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) retie_parameters(model, tied_params) def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None): """ Return a context manager for autocasting mixed precision Args: native_amp (`bool`, *optional*, defaults to False): Whether mixed precision is actually enabled. cache_enabled (`bool`, *optional*, defaults to True): Whether the weight cache inside autocast should be enabled. """ state = AcceleratorState() if autocast_kwargs is None: autocast_kwargs = {} else: autocast_kwargs = autocast_kwargs.to_kwargs() if native_amp: device_type = ( "cuda" if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True)) else state.device.type ) if state.mixed_precision == "fp16": return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs) elif state.mixed_precision == "bf16" and state.distributed_type in [ DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.FSDP, DistributedType.XLA, ]: return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs) else: return torch.autocast(device_type=device_type, **autocast_kwargs) else: return contextlib.nullcontext()
accelerate/src/accelerate/utils/modeling.py/0
{ "file_path": "accelerate/src/accelerate/utils/modeling.py", "repo_id": "accelerate", "token_count": 34335 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from pathlib import Path from unittest.mock import patch import torch from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate.commands.config.config_args import BaseConfig, ClusterConfig, SageMakerConfig from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data from accelerate.commands.launch import _validate_launch_command, launch_command_parser from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import ( DEFAULT_LAUNCH_COMMAND, get_launch_command, path_in_accelerate_package, require_multi_device, require_timm, require_transformers, run_command, ) from accelerate.utils import patch_environment from accelerate.utils.launch import prepare_simple_launcher_cmd_env class AccelerateLauncherTester(unittest.TestCase): """ Test case for verifying the `accelerate launch` CLI operates correctly. If a `default_config.yaml` file is located in the cache it will temporarily move it for the duration of the tests. """ test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_cli.py") notebook_launcher_path = path_in_accelerate_package("test_utils", "scripts", "test_notebook.py") config_folder = Path.home() / ".cache/huggingface/accelerate" config_file = "default_config.yaml" config_path = config_folder / config_file changed_path = config_folder / "_default_config.yaml" test_config_path = Path("tests/test_configs") @classmethod def setUpClass(cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def tearDownClass(cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def test_no_config(self): if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd = get_launch_command(multi_gpu=True) else: cmd = DEFAULT_LAUNCH_COMMAND cmd.append(self.test_file_path) execute_subprocess_async(cmd, env=os.environ.copy()) def test_config_compatibility(self): for config in sorted(self.test_config_path.glob("**/*.yaml")): if "invalid" in str(config) or "mpi" in str(config): continue with self.subTest(config_file=config): cmd = get_launch_command(config_file=config) + [self.test_file_path] execute_subprocess_async(cmd) def test_invalid_keys(self): config_path = self.test_config_path / "invalid_keys.yaml" with self.assertRaises( RuntimeError, msg="The config file at 'invalid_keys.yaml' had unknown keys ('another_invalid_key', 'invalid_key')", ): cmd = get_launch_command(config_file=config_path) + [self.test_file_path] execute_subprocess_async(cmd) def test_accelerate_test(self): execute_subprocess_async(["accelerate", "test"]) @require_multi_device def test_notebook_launcher(self): """ This test checks a variety of situations and scenarios with the `notebook_launcher` """ cmd = ["python", self.notebook_launcher_path] with patch_environment(omp_num_threads=1, accelerate_num_processes=2): run_command(cmd) def test_mpi_multicpu_config_cmd(self): """ Parses a launch command with a test file and the 0_28_0_mpi.yaml config. Tests getting the command and environment vars and verifies the mpirun command arg values. """ mpi_config_path = str(self.test_config_path / "0_28_0_mpi.yaml") test_file_arg = "--cpu" with patch("sys.argv", ["accelerate", str(self.test_file_path), test_file_arg]): parser = launch_command_parser() args = parser.parse_args() args.config_file = mpi_config_path args, _, _ = _validate_launch_command(args) # Mock out the check for mpirun version to simulate Intel MPI with patch("accelerate.utils.launch.which", return_value=True): with patch("accelerate.utils.launch.subprocess.check_output", return_value=b"Intel MPI"): cmd, _ = prepare_simple_launcher_cmd_env(args) # Verify the mpirun command args expected_mpirun_cmd = ["mpirun", "-f", "/home/user/hostfile", "-ppn", "4", "-n", "16"] self.assertGreater(len(cmd), len(expected_mpirun_cmd)) generated_mpirun_cmd = cmd[0 : len(expected_mpirun_cmd)] self.assertEqual(expected_mpirun_cmd, generated_mpirun_cmd) # Verify the python script and args in the mpirun command python_script_cmd = cmd[len(expected_mpirun_cmd) :] self.assertEqual(len(python_script_cmd), 3) self.assertEqual(python_script_cmd[1], str(self.test_file_path)) self.assertEqual(python_script_cmd[2], test_file_arg) class LaunchArgTester(unittest.TestCase): """ Test cases revolving around the CLI wrappers """ parser = launch_command_parser() def test_hyphen(self): # Try a little from each cluster args = ["--config-file", "test.yaml", "test.py"] result = self.parser.parse_args(args) assert result.config_file == "test.yaml" assert result.multi_gpu is False args = ["--multi-gpu", "--num-processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.num_processes == 4 # And use a mix args = ["--multi-gpu", "--use-deepspeed", "--use-fsdp", "--num_processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.use_deepspeed is True assert result.use_fsdp is True assert result.num_processes == 4 def test_underscore(self): # Try a little from each cluster args = ["--config_file", "test.yaml", "test.py"] result = self.parser.parse_args(args) assert result.config_file == "test.yaml" args = ["--multi_gpu", "--num_processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.num_processes == 4 # And use a mix args = ["--multi_gpu", "--use_deepspeed", "--use_fsdp", "--num-processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.use_deepspeed is True assert result.use_fsdp is True assert result.num_processes == 4 def test_duplicate_entities(self): help_return = self.parser.format_help() args = self.parser.parse_args(["test.py"]) for arg in args.__dict__: if "_" in arg: bad_arg = f'--{arg.replace("_", "-")}' # Need an exception for `num-processes` since it's in the docstring if bad_arg == "--num-processes": assert help_return.count(bad_arg) == 1, f"Found {bad_arg} in `accelerate launch -h`" else: assert bad_arg not in help_return, f"Found {bad_arg} in `accelerate launch -h`" class ClusterConfigTester(unittest.TestCase): """ Test case for verifying the config dataclasses work """ def test_base_config(self): # Tests that all the dataclasses can be initialized config = BaseConfig( compute_environment="LOCAL_MACHINE", distributed_type="NO", mixed_precision="fp16", debug=False, use_cpu=False, ) assert config.compute_environment == "LOCAL_MACHINE" assert config.distributed_type == "NO" assert config.mixed_precision == "fp16" assert config.debug is False def test_cluster_config(self): # First normally config = ClusterConfig( compute_environment="LOCAL_MACHINE", distributed_type="NO", mixed_precision="fp16", num_processes=2, debug=False, use_cpu=False, ) assert config.compute_environment == "LOCAL_MACHINE" assert config.distributed_type == "NO" assert config.mixed_precision == "fp16" assert config.debug is False # Then check with other compute environments config = ClusterConfig( compute_environment="LOCAL_MACHINE", distributed_type="MULTI_GPU", mixed_precision="fp16", debug=False, num_processes=2, enable_cpu_affinity=True, use_cpu=False, ) assert config.distributed_type == "MULTI_GPU" assert config.num_processes == 2 assert config.enable_cpu_affinity is True def test_sagemaker_config(self): config = SageMakerConfig( compute_environment="AMAZON_SAGEMAKER", distributed_type="NO", mixed_precision="fp16", debug=False, use_cpu=False, ec2_instance_type="MY_TYPE", iam_role_name="MY_ROLE", ) assert config.compute_environment == "AMAZON_SAGEMAKER" assert config.ec2_instance_type == "MY_TYPE" assert config.iam_role_name == "MY_ROLE" class TpuConfigTester(unittest.TestCase): """ Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command. """ tpu_name = "test-tpu" tpu_zone = "us-central1-a" command = "ls" cmd = ["accelerate", "tpu-config"] base_output = "cd /usr/share" command_file = "tests/test_samples/test_command_file.sh" gcloud = "Running gcloud compute tpus tpu-vm ssh" def test_base(self): output = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"], return_stdout=True, ) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_base_backward_compatibility(self): output = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ], return_stdout=True, ) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_with_config_file(self): output = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"], return_stdout=True ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_with_config_file_and_command(self): output = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"], return_stdout=True, ) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_with_config_file_and_multiple_command(self): output = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", 'echo "Hello World"', "--debug", ], return_stdout=True, ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' in output ) def test_with_config_file_and_command_file(self): output = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"], return_stdout=True, ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_with_config_file_and_command_file_backward_compatibility(self): output = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ], return_stdout=True, ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_accelerate_install(self): output = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"], return_stdout=True, ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_accelerate_install_version(self): output = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ], return_stdout=True, ) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' in output ) class ModelEstimatorTester(unittest.TestCase): """ Test case for checking the output of `accelerate estimate-memory` is correct. - Uses `estimate_command` when trying to catch raised errors - Uses `gather_data` when just verifying the calculations are correct """ parser = estimate_command_parser() def test_invalid_model_name(self): with self.assertRaises( RepositoryNotFoundError, msg="Repo for model `somebrokenname` does not exist on the Hub" ): args = self.parser.parse_args(["somebrokenname"]) estimate_command(args) @require_timm def test_invalid_model_name_timm(self): with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `timm` but"): args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "timm"]) estimate_command(args) @require_transformers def test_invalid_model_name_transformers(self): with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `transformers` but"): args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "transformers"]) estimate_command(args) def test_no_metadata(self): with self.assertRaises( ValueError, msg="Model `muellerzr/dummy` does not have any library metadata on the Hub" ): args = self.parser.parse_args(["muellerzr/dummy"]) estimate_command(args) def test_gated(self): with self.assertRaises(GatedRepoError, msg="Repo for model `meta-llama/Llama-2-7b-hf` is gated"): args = self.parser.parse_args(["meta-llama/Llama-2-7b-hf"]) with patch_environment(hf_hub_disable_implicit_token="1"): estimate_command(args) @require_transformers def test_remote_code(self): # Also tests that custom `Auto` classes work args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model"]) with self.assertRaises(ValueError, msg="--trust_remote_code"): gather_data(args) # Verify it works with the flag args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model", "--trust_remote_code"]) gather_data(args) @require_transformers def test_explicit_dtypes(self): args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32", "float16"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 89075712, 433249280 # Check that full precision -> int4 is calculating correctly assert len(output) == 2, f"Output was missing a precision, expected 2 but received {len(output)}" for i, factor in enumerate([1, 2]): precision = 32 // factor precision_str = f"float{precision}" largest_layer_estimate = largest_layer / factor total_size_estimate = total_size / factor total_training_size_estimate = total_size_estimate * 4 assert precision_str == output[i][0], f"Output is missing precision `{precision_str}`" assert ( largest_layer_estimate == output[i][1] ), f"Calculation for largest layer size in `{precision_str}` is incorrect." assert ( total_size_estimate == output[i][2] ), f"Calculation for total size in `{precision_str}` is incorrect." assert ( total_training_size_estimate == output[i][3] ), f"Calculation for total training size in `{precision_str}` is incorrect." @require_transformers def test_transformers_model(self): args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 89075712, 433249280 assert ( largest_layer == output[0][1] ), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}" assert ( total_size == output[0][2] ), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}" @require_transformers def test_no_split_modules(self): # idefics-80b-instruct has ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] args = self.parser.parse_args(["HuggingFaceM4/idefics-80b-instruct", "--dtypes", "float32"]) output = gather_data(args) # without factoring in `no_split` modules, the largest layer is 721420288 bytes assert output[0][1] != 721420288, "Largest layer calculation incorrect, did not factor in `no_split` modules." # the real answer is 3240165632 bytes assert output[0][1] == 3240165632 @require_timm def test_timm_model(self): args = self.parser.parse_args(["timm/resnet50.a1_in1k", "--library_name", "timm"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 9437184, 102441032 assert ( largest_layer == output[0][1] ), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}" assert ( total_size == output[0][2] ), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}"
accelerate/tests/test_cli.py/0
{ "file_path": "accelerate/tests/test_cli.py", "repo_id": "accelerate", "token_count": 9037 }
8
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import torch from accelerate import Accelerator from accelerate.big_modeling import dispatch_model from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, assert_exception, device_count, execute_subprocess_async, get_launch_command, path_in_accelerate_package, require_huggingface_suite, require_multi_device, require_multi_gpu, require_non_torch_xla, require_pippy, ) from accelerate.utils import patch_environment class MultiDeviceTester(unittest.TestCase): test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_script.py") data_loop_file_path = path_in_accelerate_package("test_utils", "scripts", "test_distributed_data_loop.py") operation_file_path = path_in_accelerate_package("test_utils", "scripts", "test_ops.py") pippy_file_path = path_in_accelerate_package("test_utils", "scripts", "external_deps", "test_pippy.py") @require_multi_device def test_multi_device(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_multi_device def test_multi_device_ops(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.operation_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_multi_device def test_pad_across_processes(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_non_torch_xla @require_multi_gpu def test_distributed_data_loop(self): """ This TestCase checks the behaviour that occurs during distributed training or evaluation, when the batch size does not evenly divide the dataset size. """ print(f"Found {device_count} devices, using 2 devices only") cmd = get_launch_command(num_processes=2) + [self.data_loop_file_path] with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"): execute_subprocess_async(cmd) @require_multi_gpu @require_pippy @require_huggingface_suite def test_pippy(self): """ Checks the integration with the pippy framework """ print(f"Found {device_count} devices") cmd = get_launch_command(multi_gpu=True, num_processes=device_count) + [self.pippy_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) if __name__ == "__main__": accelerator = Accelerator() shape = (accelerator.state.process_index + 2, 10) tensor = torch.randint(0, 10, shape).to(accelerator.device) error_msg = "" tensor1 = accelerator.pad_across_processes(tensor) if tensor1.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." tensor2 = accelerator.pad_across_processes(tensor, pad_first=True) if tensor2.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0." index = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensor2[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensor2[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg) # Check device_map accelerator.print("Test `device_map` cannot be prepared.") class ModelForTest(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(3, 4) self.batchnorm = torch.nn.BatchNorm1d(4) self.linear2 = torch.nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} model = ModelForTest() dispatch_model(model, device_map=device_map) with assert_exception(ValueError, "You can't train a model that has been loaded with"): model = accelerator.prepare_model(model)
accelerate/tests/test_multigpu.py/0
{ "file_path": "accelerate/tests/test_multigpu.py", "repo_id": "accelerate", "token_count": 2091 }
9
#!/bin/bash #SBATCH --ntasks-per-node=1 #SBATCH --exclusive #SBATCH --gres=gpu:8 #SBATCH --partition=hopper-prod # Adjust this for your cluster #SBATCH --output=/fsx/h4/logs/%x-%j.out # Adjust this for your cluster #SBATCH --err=/fsx/h4/logs/%x-%j.err # Adjust this for your cluster set -x -e source ~/.bashrc conda activate handbook echo "START TIME: $(date)" MODEL=$1 TASK=$2 PRECISION=$3 ACCELERATOR=$4 OPTIONAL_ARGS=$5 # Training setup NUM_NODES=$SLURM_NNODES GPUS_PER_NODE=8 WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE)) # Due to conflicts between Accelerate's DeepSpeed configs and Transformers' TrainingArguments, we need to parse the gradient accumulation steps from the config file to ensure they match CONFIG_FILE=recipes/$MODEL/$TASK/config_$PRECISION.yaml GRAD_ACC_STEPS=$(grep 'gradient_accumulation_steps' $CONFIG_FILE | awk '{print $2}') # Split the string into individual arguments IFS=' ' read -ra ARGS <<< "$OPTIONAL_ARGS" # Loop through the arguments and find the one with "--gradient_accumulation_steps" for arg in "${ARGS[@]}"; do if [[ "$arg" == "--gradient_accumulation_steps="* ]]; then # Extract the value after the equals sign GRAD_ACC_STEPS="${arg#*=}" break # Exit the loop once we find the desired argument fi done echo "Gradient accumulation steps: $GRAD_ACC_STEPS" # so processes know who to talk to MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) MASTER_PORT=6000 export CMD=" \ scripts/run_$TASK.py $CONFIG_FILE $OPTIONAL_ARGS " export LAUNCHER="HF_HUB_ENABLE_HF_TRANSFER=1 ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \ --config_file recipes/accelerate_configs/$ACCELERATOR.yaml \ --gradient_accumulation_steps $GRAD_ACC_STEPS \ --num_machines $NUM_NODES \ --num_processes $WORLD_SIZE \ --main_process_ip $MASTER_ADDR \ --main_process_port $MASTER_PORT \ --machine_rank \$SLURM_PROCID \ --rdzv_conf "rdzv_backend=c10d,rdzv_endpoint=$MASTER_ADDR:$MASTER_PORT" \ --max_restarts 1 \ --role \$(hostname -s): \ --tee 3 \ " # force crashing on nccl issues like hanging broadcast export NCCL_ASYNC_ERROR_HANDLING=1 # export NCCL_DEBUG=INFO # export NCCL_DEBUG_SUBSYS=COLL # export NCCL_SOCKET_NTHREADS=1 # export NCCL_NSOCKS_PERTHREAD=1 # export CUDA_LAUNCH_BLOCKING=1 # Specific configuration optimized for the Hugging Face Compute Cluster # Be ye warned this may not work on other clusters! module load cuda/12.1 # srun error handling: # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code SRUN_ARGS=" \ --wait=60 \ --kill-on-bad-exit=1 \ " clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --role \$SLURMD_NODENAME: $CMD" 2>&1 echo "END TIME: $(date)"
alignment-handbook/recipes/launch.slurm/0
{ "file_path": "alignment-handbook/recipes/launch.slurm", "repo_id": "alignment-handbook", "token_count": 1135 }
10
# Scripts to Train and Evaluate Chat Models ## Fine-tuning In the handbook, we provide three main ways to align LLMs for chat: - Full fine-tuning on a multi-GPU machine with DeepSpeed ZeRO-3 (tested on an 8 x A100 (80GB) node). - LoRA or QLoRA fine-tuning on a single consumer 24GB GPU (tested on an RTX 4090). - LoRA fine-tuning on a multi-GPU machine with DeepSpeed ZeRO-3 (tested on a 2 x A100s (80GB)). In practice, we find comparable performance for both full and QLoRA fine-tuning, with the latter having the advantage of producing small adapter weights that are fast to upload and download from the Hugging Face Hub. Here are the general commands to fine-tune your models: ```shell # Full training with ZeRO-3 on 8 GPUs ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml # QLoRA 4-bit training on a single GPU ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml # LoRA training on a single GPU ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml --load_in_4bit=false # LoRA training with ZeRO-3 on two or more GPUs ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml --num_processes={num_gpus} scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml --load_in_4bit=false ``` Here `{task}` refers to the type of training you wish to run. Currently the following tasks are supported: continued pretraining `cpt`, supervised finetuning `sft`, and direct preference optimisation `dpo`. Note that `cpt` is only present in the `gpt-nl` example recipe. {model_name}` refers to the choice of a recipe in the `recipes` directory. For example, to replicate Zephyr-7B-β you can run: ```shell # Step 1 - train SFT policy ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/zephyr-7b-beta/sft/config_full.yaml # Step 2 - align with DPO ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/zephyr-7b-beta/dpo/config_full.yaml ``` **💡 Tip:** If you scale up/down the number of GPUs, we recommend also scaling up the per-device batch size or number of gradient accumulation steps to keep the global batch size constant (and thus replicate our results). By default, these scripts will push each model to your Hugging Face Hub username, i.e. `{username}/{model_name}-{task}`. You can override the parameters in each YAML config by appending them to the command as follows: ```shell # Change batch size, number of epochs etc ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml --per_device_train_batch_size=42 --num_train_epochs=5 ``` ## Logging with Weights and Biases By default all training metrics are logged with TensorBoard. If you have a [Weights and Biases](https://wandb.ai/site) account and are logged in, you can view the training metrics by appending `--report_to=wandb`, e.g. ```shell ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml --report_to=wandb ``` ## Launching jobs on a Slurm cluster If you have access to a Slurm cluster, we provide a `recipes/launch.slurm` script that will automatically queue training jobs for you. Here's how you can use it: ```shell sbatch --job-name=handbook_{task} --nodes=1 recipes/launch.slurm {model_name} {task} {precision} {accelerator} ``` Here `{model_name}` and `{task}` are defined as above, while `{precision}` refers to the type of training (`full` vs `qlora`) and `{accelerator}` refers to the choice of 🤗 Accelerate config in `recipes/accelerate_configs`. If you wish to override the default config parameters, you can provide them by appending a space-separated string like `'--arg1=value1 --arg2=value2'. Here's a concrete example to run SFT on 1 node of 8 GPUs: ```shell # Launch on Slurm and override default hyperparameters sbatch --job-name=handbook_sft --nodes=1 recipes/launch.slurm zephyr-7b-beta sft full deepspeed_zero3 '--per_device_train_batch_size=42 --num_train_epochs=5' ``` You can scale the number of nodes by increasing the `--nodes` flag. **⚠️ Note:** the configuration in `recipes/launch.slurm` is optimised for the Hugging Face Compute Cluster and may require tweaking to be adapted to your own compute nodes. ## Fine-tuning on your datasets Under the hood, each training script uses the `get_datasets()` function which allows one to easily combine multiple datasets with varying proportions. For instance, this is how one can specify multiple datasets and which splits to combine in one of the YAML configs: ```yaml datasets_mixer: dataset_1: 0.5 # Use 50% of the training examples dataset_2: 0.66 # Use 66% of the training examples dataset_3: 0.10 # Use 10% of the training examples dataset_splits: - train_xxx # The training splits to mix - test_xxx # The test splits to mix ``` If you want to fine-tune on your datasets, the main thing to keep in mind is how the chat templates are applied to the dataset blend. Since each task (SFT, DPO, etc), requires a different format, we assume the datasets have the following columns: **SFT** * `messages`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}`. * See [ultrachat_200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) for an example. **DPO** * `chosen`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}` corresponding to the preferred dialogue. * `rejected`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}` corresponding to the dispreferred dialogue. * See [ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) for an example. We also find it useful to include dedicated splits per task in our datasets, so e.g. we have: * `{train,test}_sft`: Splits for SFT training. * `{train,test}_gen`: Splits for generation ranking like rejection sampling or PPO. * `{train,test}_prefs`: Splits for preference modelling, like reward modelling or DPO. If you format your dataset in the same way, our training scripts should work out of the box! ## Evaluating chat models We recommend benchmarking chat models on: * [MT-Bench](https://huggingface.co/spaces/lmsys/mt-bench): a multi-turn benchmark spanning 80 dialogues and 10 domains. * [AlpacaEval](https://github.com/tatsu-lab/alpaca_eval): a single-turn benchmark which evaluates the helpfulness of chat and instruct models against `text-davinci-003`. For both benchmarks, we have added support for the [Zephyr chat template](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full/blob/ac6e600eefcce74f5e8bae1035d4f66019e93190/tokenizer_config.json#L30) (which is the default produced by our scripts), so you can evaluate models produced by our scripts as follows: **MT-Bench** * Follow the installation instructions [here](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge) * Make sure the word `zephyr` exists in the `--model-path` argument when generating the model responses [here](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge#step-1-generate-model-answers-to-mt-bench-questions). This will ensure the correct chat template is loaded. For example, the following model name is valid: `--model-path {hub_username}/my-baby-zephyr` * Generate the model responses and GPT-4 rankings. **AlpacaEval** * Follow the installation instructions [here](https://github.com/tatsu-lab/alpaca_eval#quick-start) * Copy-paste the [config](https://github.com/tatsu-lab/alpaca_eval/blob/main/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml) for `zephyr-7b-beta` and place it in the `model_configs` directory under `{your_zephyr_model}`. * Next, update the [config name](https://github.com/tatsu-lab/alpaca_eval/blob/2daa6e11b194653043ca74f735728dc068e04aae/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml#L1) and [Hub model ID](https://github.com/tatsu-lab/alpaca_eval/blob/2daa6e11b194653043ca74f735728dc068e04aae/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml#L5) to match your model name. * Follow the steps to evaluate your model [here](https://github.com/tatsu-lab/alpaca_eval/tree/main#evaluating-a-model). Note that MT-Bench and AlpacaEval rely on LLMs like GPT-4 to judge the quality of the model responses, and thus the ranking exhibit various biases including a preference for models distilled from GPTs. For that reason, we also recommend submitting your best models for human evaluation in: * [Chatbot Arena](https://chat.lmsys.org): a live, human evaluation of chat models in head-to-head comparisons.
alignment-handbook/scripts/README.md/0
{ "file_path": "alignment-handbook/scripts/README.md", "repo_id": "alignment-handbook", "token_count": 2967 }
11
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from copy import deepcopy import pytest from datasets import Dataset from transformers import AutoTokenizer from alignment import DataArguments, ModelArguments, apply_chat_template, get_datasets, get_tokenizer from alignment.data import maybe_insert_system_message class GetDatasetsTest(unittest.TestCase): """Each of these test datasets has 100 examples""" def test_loading_data_args(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.5, "HuggingFaceH4/testing_self_instruct_small": 0.3, "HuggingFaceH4/testing_codealpaca_small": 0.2, } data_args = DataArguments(dataset_mixer=dataset_mixer) datasets = get_datasets(data_args) self.assertEqual(len(datasets["train"]), 100) self.assertEqual(len(datasets["test"]), 300) def test_loading_data_dict(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.5, "HuggingFaceH4/testing_self_instruct_small": 0.3, "HuggingFaceH4/testing_codealpaca_small": 0.2, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 100) self.assertEqual(len(datasets["test"]), 300) def test_loading_with_unit_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 1.0, "HuggingFaceH4/testing_self_instruct_small": 1.0, "HuggingFaceH4/testing_codealpaca_small": 1.0, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 300) self.assertEqual(len(datasets["test"]), 300) def test_loading_with_fractions_greater_than_unity(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.7, "HuggingFaceH4/testing_self_instruct_small": 0.4, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 70 + 40) self.assertEqual(len(datasets["test"]), 200) def test_loading_fails_with_negative_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.7, "HuggingFaceH4/testing_self_instruct_small": -0.3, } with pytest.raises(ValueError, match=r"Dataset fractions cannot be negative."): get_datasets(dataset_mixer) def test_loading_single_split_with_unit_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 1.0, } datasets = get_datasets(dataset_mixer, splits=["test"]) self.assertEqual(len(datasets["test"]), 100) self.assertRaises(KeyError, lambda: datasets["train"]) class ApplyChatTemplateTest(unittest.TestCase): def setUp(self): model_args = ModelArguments(model_name_or_path="HuggingFaceH4/zephyr-7b-alpha") data_args = DataArguments() self.tokenizer = get_tokenizer(model_args, data_args) self.dataset = Dataset.from_dict( { "prompt": ["Hello!"], "messages": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "I am doing well, thanks!"}, ] ], "chosen": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "I am doing well, thanks!"}, ] ], "rejected": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "Not so good tbh"}, ] ], } ) def test_maybe_insert_system_message(self): # does not accept system prompt mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") # accepts system prompt. use codellama since it has no HF token reqiurement llama_tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-hf") messages_sys_excl = [{"role": "user", "content": "Tell me a joke."}] messages_sys_incl = [{"role": "system", "content": ""}, {"role": "user", "content": "Tell me a joke."}] mistral_messages = deepcopy(messages_sys_excl) llama_messages = deepcopy(messages_sys_excl) maybe_insert_system_message(mistral_messages, mistral_tokenizer) maybe_insert_system_message(llama_messages, llama_tokenizer) # output from mistral should not have a system message, output from llama should self.assertEqual(mistral_messages, messages_sys_excl) self.assertEqual(llama_messages, messages_sys_incl) def test_sft(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "sft"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nI am doing well, thanks!</s>\n" }, ) def test_generation(self): # Remove last turn from messages dataset = self.dataset.map(lambda x: {"messages": x["messages"][:-1]}) dataset = dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "generation"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\n" }, ) def test_rm(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "rm"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text_chosen": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nI am doing well, thanks!</s>\n", "text_rejected": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nNot so good tbh</s>\n", }, ) def test_dpo(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "dpo"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text_prompt": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n", "text_chosen": "<|assistant|>\nI am doing well, thanks!</s>\n", "text_rejected": "<|assistant|>\nNot so good tbh</s>\n", }, )
alignment-handbook/tests/test_data.py/0
{ "file_path": "alignment-handbook/tests/test_data.py", "repo_id": "alignment-handbook", "token_count": 4201 }
12
[package] name = "candle-book" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-datasets = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } candle-flash-attn = { workspace = true, optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } num-traits = { workspace = true } intel-mkl-src = { workspace = true, optional = true } cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } image = { workspace = true, optional = true } anyhow = { workspace = true } tokio = "1.29.1" [dev-dependencies] byteorder = { workspace = true } hf-hub = { workspace = true, features=["tokio"]} clap = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } wav = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 parquet = { workspace = true } image = { workspace = true } [build-dependencies] anyhow = { workspace = true } [features] default = []
candle/candle-book/Cargo.toml/0
{ "file_path": "candle/candle-book/Cargo.toml", "repo_id": "candle", "token_count": 467 }
13
# Installation **With Cuda support**: 1. First, make sure that Cuda is correctly installed. - `nvcc --version` should print information about your Cuda compiler driver. - `nvidia-smi --query-gpu=compute_cap --format=csv` should print your GPUs compute capability, e.g. something like: ```bash compute_cap 8.9 ``` You can also compile the Cuda kernels for a specific compute cap using the `CUDA_COMPUTE_CAP=<compute cap>` environment variable. If any of the above commands errors out, please make sure to update your Cuda version. 2. Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) with Cuda support. Start by creating a new cargo: ```bash cargo new myapp cd myapp ``` Make sure to add the `candle-core` crate with the cuda feature: ```bash cargo add --git https://github.com/huggingface/candle.git candle-core --features "cuda" ``` Run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **Without Cuda support**: Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as follows: ```bash cargo new myapp cd myapp cargo add --git https://github.com/huggingface/candle.git candle-core ``` Finally, run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **With mkl support** You can also see the `mkl` feature which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)
candle/candle-book/src/guide/installation.md/0
{ "file_path": "candle/candle-book/src/guide/installation.md", "repo_id": "candle", "token_count": 487 }
14
mod benchmarks; use criterion::criterion_main; criterion_main!( benchmarks::affine::benches, benchmarks::matmul::benches, benchmarks::random::benches, benchmarks::where_cond::benches );
candle/candle-core/benches/bench_main.rs/0
{ "file_path": "candle/candle-core/benches/bench_main.rs", "repo_id": "candle", "token_count": 71 }
15
#![allow(clippy::excessive_precision)] // Code taken from https://github.com/statrs-dev/statrs //! Provides the [error](https://en.wikipedia.org/wiki/Error_function) and //! related functions mod evaluate { //! Provides functions that don't have a numerical solution and must //! be solved computationally (e.g. evaluation of a polynomial) /// evaluates a polynomial at `z` where `coeff` are the coeffecients /// to a polynomial of order `k` where `k` is the length of `coeff` and the /// coeffecient /// to the `k`th power is the `k`th element in coeff. E.g. [3,-1,2] equates to /// `2z^2 - z + 3` /// /// # Remarks /// /// Returns 0 for a 0 length coefficient slice pub fn polynomial(z: f64, coeff: &[f64]) -> f64 { let n = coeff.len(); if n == 0 { return 0.0; } let mut sum = *coeff.last().unwrap(); for c in coeff[0..n - 1].iter().rev() { sum = *c + z * sum; } sum } } use std::f64; /// `erf` calculates the error function at `x`. pub fn erf(x: f64) -> f64 { if x.is_nan() { f64::NAN } else if x >= 0.0 && x.is_infinite() { 1.0 } else if x <= 0.0 && x.is_infinite() { -1.0 } else if x == 0. { 0.0 } else { erf_impl(x, false) } } /// `erf_inv` calculates the inverse error function /// at `x`. pub fn erf_inv(x: f64) -> f64 { if x == 0.0 { 0.0 } else if x >= 1.0 { f64::INFINITY } else if x <= -1.0 { f64::NEG_INFINITY } else if x < 0.0 { erf_inv_impl(-x, 1.0 + x, -1.0) } else { erf_inv_impl(x, 1.0 - x, 1.0) } } /// `erfc` calculates the complementary error function /// at `x`. pub fn erfc(x: f64) -> f64 { if x.is_nan() { f64::NAN } else if x == f64::INFINITY { 0.0 } else if x == f64::NEG_INFINITY { 2.0 } else { erf_impl(x, true) } } /// `erfc_inv` calculates the complementary inverse /// error function at `x`. pub fn erfc_inv(x: f64) -> f64 { if x <= 0.0 { f64::INFINITY } else if x >= 2.0 { f64::NEG_INFINITY } else if x > 1.0 { erf_inv_impl(-1.0 + x, 2.0 - x, -1.0) } else { erf_inv_impl(1.0 - x, x, 1.0) } } // ********************************************************** // ********** Coefficients for erf_impl polynomial ********** // ********************************************************** /// Polynomial coefficients for a numerator of `erf_impl` /// in the interval [1e-10, 0.5]. const ERF_IMPL_AN: &[f64] = &[ 0.00337916709551257388990745, -0.00073695653048167948530905, -0.374732337392919607868241, 0.0817442448733587196071743, -0.0421089319936548595203468, 0.0070165709512095756344528, -0.00495091255982435110337458, 0.000871646599037922480317225, ]; /// Polynomial coefficients for a denominator of `erf_impl` /// in the interval [1e-10, 0.5] const ERF_IMPL_AD: &[f64] = &[ 1.0, -0.218088218087924645390535, 0.412542972725442099083918, -0.0841891147873106755410271, 0.0655338856400241519690695, -0.0120019604454941768171266, 0.00408165558926174048329689, -0.000615900721557769691924509, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [0.5, 0.75]. const ERF_IMPL_BN: &[f64] = &[ -0.0361790390718262471360258, 0.292251883444882683221149, 0.281447041797604512774415, 0.125610208862766947294894, 0.0274135028268930549240776, 0.00250839672168065762786937, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [0.5, 0.75]. const ERF_IMPL_BD: &[f64] = &[ 1.0, 1.8545005897903486499845, 1.43575803037831418074962, 0.582827658753036572454135, 0.124810476932949746447682, 0.0113724176546353285778481, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [0.75, 1.25]. const ERF_IMPL_CN: &[f64] = &[ -0.0397876892611136856954425, 0.153165212467878293257683, 0.191260295600936245503129, 0.10276327061989304213645, 0.029637090615738836726027, 0.0046093486780275489468812, 0.000307607820348680180548455, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [0.75, 1.25]. const ERF_IMPL_CD: &[f64] = &[ 1.0, 1.95520072987627704987886, 1.64762317199384860109595, 0.768238607022126250082483, 0.209793185936509782784315, 0.0319569316899913392596356, 0.00213363160895785378615014, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [1.25, 2.25]. const ERF_IMPL_DN: &[f64] = &[ -0.0300838560557949717328341, 0.0538578829844454508530552, 0.0726211541651914182692959, 0.0367628469888049348429018, 0.00964629015572527529605267, 0.00133453480075291076745275, 0.778087599782504251917881e-4, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [1.25, 2.25]. const ERF_IMPL_DD: &[f64] = &[ 1.0, 1.75967098147167528287343, 1.32883571437961120556307, 0.552528596508757581287907, 0.133793056941332861912279, 0.0179509645176280768640766, 0.00104712440019937356634038, -0.106640381820357337177643e-7, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [2.25, 3.5]. const ERF_IMPL_EN: &[f64] = &[ -0.0117907570137227847827732, 0.014262132090538809896674, 0.0202234435902960820020765, 0.00930668299990432009042239, 0.00213357802422065994322516, 0.00025022987386460102395382, 0.120534912219588189822126e-4, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [2.25, 3.5]. const ERF_IMPL_ED: &[f64] = &[ 1.0, 1.50376225203620482047419, 0.965397786204462896346934, 0.339265230476796681555511, 0.0689740649541569716897427, 0.00771060262491768307365526, 0.000371421101531069302990367, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [3.5, 5.25]. const ERF_IMPL_FN: &[f64] = &[ -0.00546954795538729307482955, 0.00404190278731707110245394, 0.0054963369553161170521356, 0.00212616472603945399437862, 0.000394984014495083900689956, 0.365565477064442377259271e-4, 0.135485897109932323253786e-5, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [3.5, 5.25]. const ERF_IMPL_FD: &[f64] = &[ 1.0, 1.21019697773630784832251, 0.620914668221143886601045, 0.173038430661142762569515, 0.0276550813773432047594539, 0.00240625974424309709745382, 0.891811817251336577241006e-4, -0.465528836283382684461025e-11, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [5.25, 8]. const ERF_IMPL_GN: &[f64] = &[ -0.00270722535905778347999196, 0.0013187563425029400461378, 0.00119925933261002333923989, 0.00027849619811344664248235, 0.267822988218331849989363e-4, 0.923043672315028197865066e-6, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [5.25, 8]. const ERF_IMPL_GD: &[f64] = &[ 1.0, 0.814632808543141591118279, 0.268901665856299542168425, 0.0449877216103041118694989, 0.00381759663320248459168994, 0.000131571897888596914350697, 0.404815359675764138445257e-11, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [8, 11.5]. const ERF_IMPL_HN: &[f64] = &[ -0.00109946720691742196814323, 0.000406425442750422675169153, 0.000274499489416900707787024, 0.465293770646659383436343e-4, 0.320955425395767463401993e-5, 0.778286018145020892261936e-7, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [8, 11.5]. const ERF_IMPL_HD: &[f64] = &[ 1.0, 0.588173710611846046373373, 0.139363331289409746077541, 0.0166329340417083678763028, 0.00100023921310234908642639, 0.24254837521587225125068e-4, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [11.5, 17]. const ERF_IMPL_IN: &[f64] = &[ -0.00056907993601094962855594, 0.000169498540373762264416984, 0.518472354581100890120501e-4, 0.382819312231928859704678e-5, 0.824989931281894431781794e-7, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [11.5, 17]. const ERF_IMPL_ID: &[f64] = &[ 1.0, 0.339637250051139347430323, 0.043472647870310663055044, 0.00248549335224637114641629, 0.535633305337152900549536e-4, -0.117490944405459578783846e-12, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [17, 24]. const ERF_IMPL_JN: &[f64] = &[ -0.000241313599483991337479091, 0.574224975202501512365975e-4, 0.115998962927383778460557e-4, 0.581762134402593739370875e-6, 0.853971555085673614607418e-8, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [17, 24]. const ERF_IMPL_JD: &[f64] = &[ 1.0, 0.233044138299687841018015, 0.0204186940546440312625597, 0.000797185647564398289151125, 0.117019281670172327758019e-4, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [24, 38]. const ERF_IMPL_KN: &[f64] = &[ -0.000146674699277760365803642, 0.162666552112280519955647e-4, 0.269116248509165239294897e-5, 0.979584479468091935086972e-7, 0.101994647625723465722285e-8, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [24, 38]. const ERF_IMPL_KD: &[f64] = &[ 1.0, 0.165907812944847226546036, 0.0103361716191505884359634, 0.000286593026373868366935721, 0.298401570840900340874568e-5, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [38, 60]. const ERF_IMPL_LN: &[f64] = &[ -0.583905797629771786720406e-4, 0.412510325105496173512992e-5, 0.431790922420250949096906e-6, 0.993365155590013193345569e-8, 0.653480510020104699270084e-10, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [38, 60]. const ERF_IMPL_LD: &[f64] = &[ 1.0, 0.105077086072039915406159, 0.00414278428675475620830226, 0.726338754644523769144108e-4, 0.477818471047398785369849e-6, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [60, 85]. const ERF_IMPL_MN: &[f64] = &[ -0.196457797609229579459841e-4, 0.157243887666800692441195e-5, 0.543902511192700878690335e-7, 0.317472492369117710852685e-9, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [60, 85]. const ERF_IMPL_MD: &[f64] = &[ 1.0, 0.052803989240957632204885, 0.000926876069151753290378112, 0.541011723226630257077328e-5, 0.535093845803642394908747e-15, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [85, 110]. const ERF_IMPL_NN: &[f64] = &[ -0.789224703978722689089794e-5, 0.622088451660986955124162e-6, 0.145728445676882396797184e-7, 0.603715505542715364529243e-10, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [85, 110]. const ERF_IMPL_ND: &[f64] = &[ 1.0, 0.0375328846356293715248719, 0.000467919535974625308126054, 0.193847039275845656900547e-5, ]; // ********************************************************** // ********** Coefficients for erf_inv_impl polynomial ****** // ********************************************************** /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0, 0.5]. const ERF_INV_IMPL_AN: &[f64] = &[ -0.000508781949658280665617, -0.00836874819741736770379, 0.0334806625409744615033, -0.0126926147662974029034, -0.0365637971411762664006, 0.0219878681111168899165, 0.00822687874676915743155, -0.00538772965071242932965, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0, 0.5]. const ERF_INV_IMPL_AD: &[f64] = &[ 1.0, -0.970005043303290640362, -1.56574558234175846809, 1.56221558398423026363, 0.662328840472002992063, -0.71228902341542847553, -0.0527396382340099713954, 0.0795283687341571680018, -0.00233393759374190016776, 0.000886216390456424707504, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.5, 0.75]. const ERF_INV_IMPL_BN: &[f64] = &[ -0.202433508355938759655, 0.105264680699391713268, 8.37050328343119927838, 17.6447298408374015486, -18.8510648058714251895, -44.6382324441786960818, 17.445385985570866523, 21.1294655448340526258, -3.67192254707729348546, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.5, 0.75]. const ERF_INV_IMPL_BD: &[f64] = &[ 1.0, 6.24264124854247537712, 3.9713437953343869095, -28.6608180499800029974, -20.1432634680485188801, 48.5609213108739935468, 10.8268667355460159008, -22.6436933413139721736, 1.72114765761200282724, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x less than 3. const ERF_INV_IMPL_CN: &[f64] = &[ -0.131102781679951906451, -0.163794047193317060787, 0.117030156341995252019, 0.387079738972604337464, 0.337785538912035898924, 0.142869534408157156766, 0.0290157910005329060432, 0.00214558995388805277169, -0.679465575181126350155e-6, 0.285225331782217055858e-7, -0.681149956853776992068e-9, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x less than 3. const ERF_INV_IMPL_CD: &[f64] = &[ 1.0, 3.46625407242567245975, 5.38168345707006855425, 4.77846592945843778382, 2.59301921623620271374, 0.848854343457902036425, 0.152264338295331783612, 0.01105924229346489121, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 3 and 6. const ERF_INV_IMPL_DN: &[f64] = &[ -0.0350353787183177984712, -0.00222426529213447927281, 0.0185573306514231072324, 0.00950804701325919603619, 0.00187123492819559223345, 0.000157544617424960554631, 0.460469890584317994083e-5, -0.230404776911882601748e-9, 0.266339227425782031962e-11, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 3 and 6. const ERF_INV_IMPL_DD: &[f64] = &[ 1.0, 1.3653349817554063097, 0.762059164553623404043, 0.220091105764131249824, 0.0341589143670947727934, 0.00263861676657015992959, 0.764675292302794483503e-4, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 6 and 18. const ERF_INV_IMPL_EN: &[f64] = &[ -0.0167431005076633737133, -0.00112951438745580278863, 0.00105628862152492910091, 0.000209386317487588078668, 0.149624783758342370182e-4, 0.449696789927706453732e-6, 0.462596163522878599135e-8, -0.281128735628831791805e-13, 0.99055709973310326855e-16, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 6 and 18. const ERF_INV_IMPL_ED: &[f64] = &[ 1.0, 0.591429344886417493481, 0.138151865749083321638, 0.0160746087093676504695, 0.000964011807005165528527, 0.275335474764726041141e-4, 0.282243172016108031869e-6, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 18 and 44. const ERF_INV_IMPL_FN: &[f64] = &[ -0.0024978212791898131227, -0.779190719229053954292e-5, 0.254723037413027451751e-4, 0.162397777342510920873e-5, 0.396341011304801168516e-7, 0.411632831190944208473e-9, 0.145596286718675035587e-11, -0.116765012397184275695e-17, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 18 and 44. const ERF_INV_IMPL_FD: &[f64] = &[ 1.0, 0.207123112214422517181, 0.0169410838120975906478, 0.000690538265622684595676, 0.145007359818232637924e-4, 0.144437756628144157666e-6, 0.509761276599778486139e-9, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x greater than 44. const ERF_INV_IMPL_GN: &[f64] = &[ -0.000539042911019078575891, -0.28398759004727721098e-6, 0.899465114892291446442e-6, 0.229345859265920864296e-7, 0.225561444863500149219e-9, 0.947846627503022684216e-12, 0.135880130108924861008e-14, -0.348890393399948882918e-21, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x greater than 44. const ERF_INV_IMPL_GD: &[f64] = &[ 1.0, 0.0845746234001899436914, 0.00282092984726264681981, 0.468292921940894236786e-4, 0.399968812193862100054e-6, 0.161809290887904476097e-8, 0.231558608310259605225e-11, ]; /// `erf_impl` computes the error function at `z`. /// If `inv` is true, `1 - erf` is calculated as opposed to `erf` fn erf_impl(z: f64, inv: bool) -> f64 { if z < 0.0 { if !inv { return -erf_impl(-z, false); } if z < -0.5 { return 2.0 - erf_impl(-z, true); } return 1.0 + erf_impl(-z, false); } let result = if z < 0.5 { if z < 1e-10 { z * 1.125 + z * 0.003379167095512573896158903121545171688 } else { z * 1.125 + z * evaluate::polynomial(z, ERF_IMPL_AN) / evaluate::polynomial(z, ERF_IMPL_AD) } } else if z < 110.0 { let (r, b) = if z < 0.75 { ( evaluate::polynomial(z - 0.5, ERF_IMPL_BN) / evaluate::polynomial(z - 0.5, ERF_IMPL_BD), 0.3440242112, ) } else if z < 1.25 { ( evaluate::polynomial(z - 0.75, ERF_IMPL_CN) / evaluate::polynomial(z - 0.75, ERF_IMPL_CD), 0.419990927, ) } else if z < 2.25 { ( evaluate::polynomial(z - 1.25, ERF_IMPL_DN) / evaluate::polynomial(z - 1.25, ERF_IMPL_DD), 0.4898625016, ) } else if z < 3.5 { ( evaluate::polynomial(z - 2.25, ERF_IMPL_EN) / evaluate::polynomial(z - 2.25, ERF_IMPL_ED), 0.5317370892, ) } else if z < 5.25 { ( evaluate::polynomial(z - 3.5, ERF_IMPL_FN) / evaluate::polynomial(z - 3.5, ERF_IMPL_FD), 0.5489973426, ) } else if z < 8.0 { ( evaluate::polynomial(z - 5.25, ERF_IMPL_GN) / evaluate::polynomial(z - 5.25, ERF_IMPL_GD), 0.5571740866, ) } else if z < 11.5 { ( evaluate::polynomial(z - 8.0, ERF_IMPL_HN) / evaluate::polynomial(z - 8.0, ERF_IMPL_HD), 0.5609807968, ) } else if z < 17.0 { ( evaluate::polynomial(z - 11.5, ERF_IMPL_IN) / evaluate::polynomial(z - 11.5, ERF_IMPL_ID), 0.5626493692, ) } else if z < 24.0 { ( evaluate::polynomial(z - 17.0, ERF_IMPL_JN) / evaluate::polynomial(z - 17.0, ERF_IMPL_JD), 0.5634598136, ) } else if z < 38.0 { ( evaluate::polynomial(z - 24.0, ERF_IMPL_KN) / evaluate::polynomial(z - 24.0, ERF_IMPL_KD), 0.5638477802, ) } else if z < 60.0 { ( evaluate::polynomial(z - 38.0, ERF_IMPL_LN) / evaluate::polynomial(z - 38.0, ERF_IMPL_LD), 0.5640528202, ) } else if z < 85.0 { ( evaluate::polynomial(z - 60.0, ERF_IMPL_MN) / evaluate::polynomial(z - 60.0, ERF_IMPL_MD), 0.5641309023, ) } else { ( evaluate::polynomial(z - 85.0, ERF_IMPL_NN) / evaluate::polynomial(z - 85.0, ERF_IMPL_ND), 0.5641584396, ) }; let g = (-z * z).exp() / z; g * b + g * r } else { 0.0 }; if inv && z >= 0.5 { result } else if z >= 0.5 || inv { 1.0 - result } else { result } } // `erf_inv_impl` computes the inverse error function where // `p`,`q`, and `s` are the first, second, and third intermediate // parameters respectively fn erf_inv_impl(p: f64, q: f64, s: f64) -> f64 { let result = if p <= 0.5 { let y = 0.0891314744949340820313; let g = p * (p + 10.0); let r = evaluate::polynomial(p, ERF_INV_IMPL_AN) / evaluate::polynomial(p, ERF_INV_IMPL_AD); g * y + g * r } else if q >= 0.25 { let y = 2.249481201171875; let g = (-2.0 * q.ln()).sqrt(); let xs = q - 0.25; let r = evaluate::polynomial(xs, ERF_INV_IMPL_BN) / evaluate::polynomial(xs, ERF_INV_IMPL_BD); g / (y + r) } else { let x = (-q.ln()).sqrt(); if x < 3.0 { let y = 0.807220458984375; let xs = x - 1.125; let r = evaluate::polynomial(xs, ERF_INV_IMPL_CN) / evaluate::polynomial(xs, ERF_INV_IMPL_CD); y * x + r * x } else if x < 6.0 { let y = 0.93995571136474609375; let xs = x - 3.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_DN) / evaluate::polynomial(xs, ERF_INV_IMPL_DD); y * x + r * x } else if x < 18.0 { let y = 0.98362827301025390625; let xs = x - 6.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_EN) / evaluate::polynomial(xs, ERF_INV_IMPL_ED); y * x + r * x } else if x < 44.0 { let y = 0.99714565277099609375; let xs = x - 18.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_FN) / evaluate::polynomial(xs, ERF_INV_IMPL_FD); y * x + r * x } else { let y = 0.99941349029541015625; let xs = x - 44.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_GN) / evaluate::polynomial(xs, ERF_INV_IMPL_GD); y * x + r * x } }; s * result }
candle/candle-core/src/cpu/erf.rs/0
{ "file_path": "candle/candle-core/src/cpu/erf.rs", "repo_id": "candle", "token_count": 11974 }
16
//! ML framework for Rust //! //! ```rust //! use candle_core::{Tensor, DType, Device}; //! # use candle_core::Error; //! # fn main() -> Result<(), Error>{ //! //! let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; //! let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; //! //! let c = a.matmul(&b)?; //! # Ok(())} //! ``` //! //! ## Features //! //! - Simple syntax (looks and like PyTorch) //! - CPU and Cuda backends (and M1 support) //! - Enable serverless (CPU) small and fast deployments //! - Model training //! - Distributed computing (NCCL). //! - Models out of the box (Llama, Whisper, Falcon, ...) //! //! ## FAQ //! //! - Why Candle? //! //! Candle stems from the need to reduce binary size in order to *enable serverless* //! possible by making the whole engine smaller than PyTorch very large library volume //! //! And simply *removing Python* from production workloads. //! Python can really add overhead in more complex workflows and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches. //! //! Rust is cool, and a lot of the HF ecosystem already has Rust crates [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers) #[cfg(feature = "accelerate")] mod accelerate; pub mod backend; pub mod backprop; mod conv; mod convert; pub mod cpu; pub mod cpu_backend; #[cfg(feature = "cuda")] pub mod cuda_backend; #[cfg(feature = "cudnn")] pub mod cudnn; mod device; pub mod display; mod dtype; mod dummy_cuda_backend; mod dummy_metal_backend; pub mod error; mod indexer; pub mod layout; #[cfg(feature = "metal")] pub mod metal_backend; #[cfg(feature = "mkl")] mod mkl; pub mod npy; mod op; pub mod pickle; pub mod quantized; pub mod safetensors; pub mod scalar; pub mod shape; mod storage; mod strided_index; mod tensor; mod tensor_cat; pub mod test_utils; pub mod utils; mod variable; pub use cpu_backend::CpuStorage; pub use device::{Device, DeviceLocation, NdArray}; pub use dtype::{DType, FloatDType, IntDType, WithDType}; pub use error::{Error, Result}; pub use indexer::IndexOp; pub use layout::Layout; pub use op::{CustomOp1, CustomOp2, CustomOp3}; pub use shape::{Shape, D}; pub use storage::Storage; pub use strided_index::{StridedBlocks, StridedIndex}; pub use tensor::{Tensor, TensorId}; pub use variable::Var; #[cfg(feature = "cuda")] pub use cuda_backend::{CudaDevice, CudaStorage}; #[cfg(not(feature = "cuda"))] pub use dummy_cuda_backend::{CudaDevice, CudaStorage}; #[cfg(feature = "metal")] pub use metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(not(feature = "metal"))] pub use dummy_metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; pub trait ToUsize2 { fn to_usize2(self) -> (usize, usize); } impl ToUsize2 for usize { fn to_usize2(self) -> (usize, usize) { (self, self) } } impl ToUsize2 for (usize, usize) { fn to_usize2(self) -> (usize, usize) { self } } // A simple trait defining a module with forward method using a single argument. pub trait Module { fn forward(&self, xs: &Tensor) -> Result<Tensor>; } impl<T: Fn(&Tensor) -> Result<Tensor>> Module for T { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self(xs) } } impl<M: Module> Module for Option<&M> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { None => Ok(xs.clone()), Some(m) => m.forward(xs), } } } // A trait defining a module with forward method using a single tensor argument and a flag to // separate the training and evaluation behaviors. pub trait ModuleT { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor>; } impl<M: Module> ModuleT for M { fn forward_t(&self, xs: &Tensor, _train: bool) -> Result<Tensor> { self.forward(xs) } }
candle/candle-core/src/lib.rs/0
{ "file_path": "candle/candle-core/src/lib.rs", "repo_id": "candle", "token_count": 1521 }
17
use super::k_quants::{BlockQ2K, BlockQ4K, BlockQ4_0, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K}; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use core::arch::wasm32::*; #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1234 = v128_load(x.qs.as_ptr() as *const v128); let x12 = v128_and(x1234, u8x16_splat(0x0F)); let x12 = i8x16_sub(x12, i8x16_splat(8)); let x34 = u8x16_shr(x1234, 4); let x34 = i8x16_sub(x34, i8x16_splat(8)); let x1 = i16x8_extend_low_i8x16(x12); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_extend_high_i8x16(x12); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_extend_low_i8x16(x34); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_extend_high_i8x16(x34); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1 = i16x8_load_extend_i8x8(x.qs.as_ptr()); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(8)); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(16)); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(24)); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let mut sumf = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = i32x4_splat(0); for i in (0..(QK_K / 16)).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(i)); let scales = i32x4_shr( i32x4( sc[i] as i32, sc[i + 1] as i32, sc[i + 2] as i32, sc[i + 3] as i32, ), 4, ); summs = i32x4_add(summs, i32x4_mul(bsums, scales)) } let summs = f32x4_convert_i32x4(summs); let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = i32x4_splat(0); let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (0..16).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (16..32).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } let isum = f32x4_convert_i32x4(isum); sumf = f32x4_add( sumf, f32x4_sub( f32x4_mul(isum, f32x4_splat(dall)), f32x4_mul(summs, f32x4_splat(dmin)), ), ); } let sumf = f32x4_extract_lane::<0>(sumf) + f32x4_extract_lane::<1>(sumf) + f32x4_extract_lane::<2>(sumf) + f32x4_extract_lane::<3>(sumf); Ok(sumf) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [u8; QK_K] = [0; QK_K]; let mut sums = f32x4_splat(0f32); unsafe { for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; for j in 0..QK_K / 64 { let q4_1 = v128_load(q4.as_ptr().add(32 * j) as *const v128); let q4_2 = v128_load(q4.as_ptr().add(32 * j + 16) as *const v128); v128_store( aux8.as_mut_ptr().add(64 * j) as *mut v128, v128_and(q4_1, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 16) as *mut v128, v128_and(q4_2, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 32) as *mut v128, u8x16_shr(q4_1, 4), ); v128_store( aux8.as_mut_ptr().add(64 * j + 48) as *mut v128, u8x16_shr(q4_2, 4), ); } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = i32x4_splat(0); for j in (0..QK_K / 16).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(j)); let (m1, m2) = (mins[j / 2] as i32, mins[j / 2 + 1] as i32); let mins = i32x4(m1, m1, m2, m2); sumi = i32x4_add(sumi, i32x4_mul(bsums, mins)); } let mut aux32 = i32x4_splat(0i32); for (scale_i, scale) in scales.iter().enumerate() { let scale = i32x4_splat(*scale as i32); for j in 0..4 { let i = 32 * scale_i + 8 * j; let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(i)); let aux8 = i16x8_load_extend_u8x8(aux8.as_ptr().add(i)); let aux16 = i16x8_mul(q8, aux8); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_low_i16x8(aux16))); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_high_i16x8(aux16))); } } let aux32 = f32x4_convert_i32x4(aux32); let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); let dmin = x.dmin.to_f32() * y.d; let dmin = f32x4_splat(dmin); let sumi = f32x4_convert_i32x4(sumi); sums = f32x4_sub(sums, f32x4_mul(sumi, dmin)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; unsafe { let mut sums = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; let mut aux32 = f32x4_splat(0f32); for j in (0..QK_K).step_by(128) { let aux8 = aux8.as_mut_ptr().add(j); let q4 = &q4.as_ptr().add(j / 2); let qh = &qh.as_ptr().add(j / 4); for l in (0..32).step_by(16) { // aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and(v128_load(qh.add(l) as *const v128), u8x16_splat(3)), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 32] = // (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l + 32) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 2), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 32) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 4), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 64) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 96] = // (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l + 32) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 6), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 96) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); } } for (j, &scale) in x.scales.iter().enumerate() { let scale = f32x4_splat(scale as f32); for offset in [0, 8] { let aux16 = i16x8_mul( i16x8_load_extend_i8x8(q8.as_ptr().add(16 * j + offset)), i16x8_load_extend_i8x8(aux8.as_ptr().add(16 * j + offset)), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_low_i16x8(aux16)), scale), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_high_i16x8(aux16)), scale), ); } } let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (xs, ys) in xs.iter().zip(ys.iter()) { let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); let mut sumi = i32x4_splat(0); for j in (0..QK_K).step_by(8) { let xs = i16x8_load_extend_i8x8(x_qs.add(j)); let ys = i16x8_load_extend_i8x8(y_qs.add(j)); let sum_xy = i32x4_dot_i16x8(xs, ys); sumi = i32x4_add(sumi, sum_xy) } let d = f32x4_splat(xs.d * ys.d); acc = f32x4_add(acc, f32x4_mul(f32x4_convert_i32x4(sumi), d)) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } }
candle/candle-core/src/quantized/simd128.rs/0
{ "file_path": "candle/candle-core/src/quantized/simd128.rs", "repo_id": "candle", "token_count": 11617 }
18
#![allow(clippy::approx_constant)] use anyhow::{Context, Result}; use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var}; fn simple_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (((x * x)? + x * 5f64)? + 4f64)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., 4.]); // y = x^2 + 5.x + 4 assert_eq!(y.to_vec1::<f32>()?, [28., 10., 40.]); // dy/dx = 2.x + 5 assert_eq!(grad_x.to_vec1::<f32>()?, [11., 7., 13.]); Ok(()) } fn sum_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (x.sqr()?.sum_keepdim(0)? * 2.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [52.]); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); // Same test as before but squeezing on the last dimension. let y = (x.sqr()?.sum_keepdim(0)? * 2.)?.squeeze(0)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_scalar::<f32>()?, 52.); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); Ok(()) } fn matmul_grad(device: &Device) -> Result<()> { let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let x = Var::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let y = Var::from_slice(&data, (2, 3, 2), device)?; let c = x.matmul(&y)?; let grads = c.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; let grad_y = grads.get(&y).context("no grad for y")?; assert_eq!(grad_x.shape(), &Shape::from((2, 2, 3))); assert_eq!(grad_y.shape(), &Shape::from((2, 3, 2))); assert_eq!( &*grad_x.to_vec3::<f32>()?, &[ [[1., 5., 9.], [1., 5., 9.]], [[13., 17., 21.], [13., 17., 21.]] ] ); assert_eq!( &*grad_y.to_vec3::<f32>()?, &[ [[3., 3.], [5., 5.], [7., 7.]], [[15., 15.], [17., 17.], [19., 19.]] ] ); Ok(()) } // The simplest gradient descent, using scalar variable. fn grad_descent(device: &Device) -> Result<()> { let x = Var::new(0f32, device)?; let learning_rate = 0.1; for _step in 0..100 { let xt = x.as_tensor(); let c = ((xt - 4.2)? * (xt - 4.2)?)?; let grads = c.backward()?; let x_grad = grads.get(&x).context("no grad for x")?; x.set(&(xt - x_grad * learning_rate)?)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } fn unary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let x = x.as_tensor(); let y = (x.log()? + 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.0986, 1.0, 2.3863, -0.8971] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); let y = x.exp()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [20.0855, 2.7183, 54.5982, 1.1618] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [20.0855, 2.7183, 54.5982, 1.1618] ); let y = x.exp()?.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 3)?, [403.429, 7.389, 2980.958, 1.35] ); // exp(x)^2 = exp(2*x) assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [806.86, 14.78, 5961.92, 2.7] ); let y = x.sin()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.1411, 0.8415, -0.7568, 0.1494], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); let y = x.cos()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.1411, -0.8415, 0.7568, -0.1494], ); let y = x.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [9.0, 1.0, 16.0, 0.0225]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, 8.0, 0.3]); let y = x.sqr()?.sqrt()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3.0, 1.0, 4.0, 0.15]); assert_eq!(test_utils::to_vec1_round(grad_x, 4)?, [1.0, 1.0, 1.0, 1.0]); let y = x.neg()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [-3.0, -1.0, -4.0, -0.15]); assert_eq!(grad_x.to_vec1::<f32>()?, [-1.0, -1.0, -1.0, -1.0]); let y = x.affine(0.2, 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [1.6, 1.2, 1.8, 1.03]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.2, 0.2, 0.2, 0.2]); let y = Tensor::new(1f32, device)?.broadcast_div(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); assert_eq!( grad_x.to_vec1::<f32>()?, [-0.11111111, -1.0, -0.0625, -44.444443], ); let y = x.broadcast_div(&Tensor::new(0.5f32, device)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [6., 2., 8., 0.3]); assert_eq!(grad_x.to_vec1::<f32>()?, [2., 2., 2., 2.]); let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let y = x.powf(2.5)?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [15.59, 1.0, 32.0, 0.01]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [12.99, 2.5, 20.0, 0.15] ); let y = x.tanh()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [1.0, 0.76, 1.0, 0.15]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [0.01, 0.42, 0.0, 0.98], ); // testing compared to pytorch nn.GELU(approximate = 'tanh') let y = x.gelu()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9964, 0.8412, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0116, 1.0830, 1.0003, 0.6188], ); // Testing compared to pytorch torch.erf // // import torch // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = x.erf() // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 4)?, [1.0, 0.8427, 1.0, 0.168]); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.0001, 0.4151, 0.0, 1.1033], ); // Testing compared to pytorch nn.GELU(approximate = 'none') // // import torch // import torch.nn.functional as F // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = F.gelu(x, approximate='none') // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.gelu_erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9960, 0.8413, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0119, 1.0833, 1.0005, 0.6188], ); // Testing compared to pytorch elu // // import torch // import torch.nn.functional as F // x = torch.tensor([-1.0, 0.0, -2.0, 3.0], requires_grad=True) // y = F.elu(x, alpha=2.0) // print(y) // loss = y.min // loss = y.sum() // loss.backward() // print(x.grad) let elu_x = Var::new(&[-1.0f32, 0., -2., 3.], device)?; let y = elu_x.elu(2.)?; let grads = y.backward()?; let grad_x = grads.get(&elu_x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-1.2642, 0.0000, -1.7293, 3.0000] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.7358, 2.0000, 0.2707, 1.0000] ); // testing compared to pytorch nn.Silu() let y = x.silu()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.8577, 0.7311, 3.9281, 0.0806] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0881, 0.9277, 1.0527, 0.5747], ); if device.is_cpu() { let x = Var::new(&[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]], device)?; let y = x.interpolate1d(12)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(grad_x, 4)?, [[[10_f32, 26., 42.], [58., 74., 90.], [106., 122., 138.]]] ); } // manually checked: see comments let x = Var::new(&[[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]]], device)?; let y = x.interpolate2d(6, 6)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; // gradient should be // row 1 // 1+2+7+8 = 18 // 3+4+9+10 = 26 // 5+6+11+12 = 34 // row 2 // 13+14+19+20 = 66 // 15+16+21+22 = 74 // 17+18+23+24 = 82 // row 3 // 25+26+31+32 = 114 // 27+28+33+34 = 122 // 29+30+35+36 = 130 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?, [[18_f32, 26., 34.], [66., 74., 82.], [114., 122., 130.]] ); // manually checked: see comments let x = Var::new(&[[[[1f32, 2.], [4., 5.]]]], device)?; let y = x.interpolate2d(6, 6)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; // gradient should be // row 1 // 1+2+3+7+8+9+13+14+15 = 72 // 4+5+6+10+11+12+16+17+18 = 99 // row 2 // 19+20+21+25+26+27+31+32+33 = 234 // 22+23+24+28+29+30+34+35+36 = 243 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?, [[72_f32, 99.], [234., 261.]] ); // manually checked: see comments let x = Var::new(&[[[[1f32, 2.], [4., 5.]], [[6f32, 7.], [8., 9.]]]], device)?; let y = x.interpolate2d(4, 4)?.reshape(32)?; #[rustfmt::skip] let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32. ], device, )?; // gradient should be // m1r1 // 1+2+5+6=14 // 3+4+7+8=22 // m1r2 // 9+10+13+14=46 // 11+12+15+16=54 // m2r1 // 17+18+21+22=78 // 19+20+23+24=86 // m2r2 // 25+26+29+30=110 // 27+28+31+32=118 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?, [[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]] ); // manually checked: see comments let x = Var::new( &[[[[1f32, 2.], [4., 5.]]], [[[6f32, 7.], [8., 9.]]]], device, )?; let y = x.interpolate2d(4, 4)?.reshape(32)?; #[rustfmt::skip] let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32. ], device, )?; // gradient should be // m1r1 // 1+2+5+6=14 // 3+4+7+8=22 // m1r2 // 9+10+13+14=46 // 11+12+15+16=54 // m2r1 // 17+18+21+22=78 // 19+20+23+24=86 // m2r2 // 25+26+29+30=110 // 27+28+31+32=118 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?, [[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]] ); Ok(()) } fn binary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., -4., -1.], device)?; let x = x.as_tensor(); // leaky relu let y = x.maximum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(y.to_vec1::<f32>()?, [3., 1., -0.4, -0.1]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 0.1, 0.1]); let y = x.minimum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [0.3, 0.1, -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.1, 0.1, 1., 1.]); // This one is easy to mess up, we want the gradient to be one as it is the identity function. let y = x.minimum(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 1., 1.]); let x_var = Var::new(&[3f32, 1., -4., -1., 5., 9.], device)?; let x = x_var.as_tensor(); let y_var = Var::new(&[2f32, 7., 1.], device)?; let y = y_var.as_tensor(); let ss = x .reshape((2, 3))? .slice_scatter0(&y.reshape((1, 3))?, 1)? .sqr()?; let grads = ss.backward()?; let grad_x = grads.get(x).context("no grad for x")?; let grad_y = grads.get(y).context("no grad for y")?; assert_eq!(ss.to_vec2::<f32>()?, [[9., 1., 16.], [4., 49., 1.]]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, -8.0, 0.0, 0.0, 0.0]); assert_eq!(grad_y.to_vec1::<f32>()?, [4.0, 14.0, 2.0]); Ok(()) } test_device!( simple_grad, simple_grad_cpu, simple_grad_gpu, simple_grad_metal ); test_device!(sum_grad, sum_grad_cpu, sum_grad_gpu, sum_grad_metal); test_device!( matmul_grad, matmul_grad_cpu, matmul_grad_gpu, matmul_grad_metal ); test_device!( grad_descent, grad_descent_cpu, grad_descent_gpu, grad_descent_metal ); test_device!(unary_grad, unary_grad_cpu, unary_grad_gpu, unary_grad_metal); test_device!( binary_grad, binary_grad_cpu, binary_grad_gpu, binary_grad_metal );
candle/candle-core/tests/grad_tests.rs/0
{ "file_path": "candle/candle-core/tests/grad_tests.rs", "repo_id": "candle", "token_count": 9105 }
19
use candle::{Result, Tensor}; pub struct Batcher<I> { inner: I, batch_size: usize, return_last_incomplete_batch: bool, } impl<I> Batcher<I> { fn new(inner: I) -> Self { Self { inner, batch_size: 16, return_last_incomplete_batch: false, } } pub fn batch_size(mut self, batch_size: usize) -> Self { self.batch_size = batch_size; self } pub fn return_last_incomplete_batch(mut self, r: bool) -> Self { self.return_last_incomplete_batch = r; self } } pub struct Iter1<I: Iterator<Item = Tensor>> { inner: I, } pub struct Iter2<I: Iterator<Item = (Tensor, Tensor)>> { inner: I, } impl<I: Iterator<Item = Tensor>> Batcher<Iter1<I>> { pub fn new1(inner: I) -> Self { Self::new(Iter1 { inner }) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Batcher<Iter2<I>> { pub fn new2(inner: I) -> Self { Self::new(Iter2 { inner }) } } pub struct IterResult1<I: Iterator<Item = Result<Tensor>>> { inner: I, } pub struct IterResult2<I: Iterator<Item = Result<(Tensor, Tensor)>>> { inner: I, } impl<I: Iterator<Item = Result<Tensor>>> Batcher<IterResult1<I>> { pub fn new_r1(inner: I) -> Self { Self::new(IterResult1 { inner }) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Batcher<IterResult2<I>> { pub fn new_r2(inner: I) -> Self { Self::new(IterResult2 { inner }) } } impl<I: Iterator<Item = Tensor>> Iterator for Batcher<Iter1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch { break; } return None; } } } Some(Tensor::stack(&items, 0)) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Iterator for Batcher<Iter2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { match self.inner.inner.next() { Some((x, y)) => { xs.push(x); ys.push(y) } None => { if self.return_last_incomplete_batch { break; } return None; } } } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } } impl<I: Iterator<Item = Result<Tensor>>> Iterator for Batcher<IterResult1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch { break; } return None; } } } let items = items.into_iter().collect::<Result<Vec<Tensor>>>(); Some(items.and_then(|items| Tensor::stack(&items, 0))) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Iterator for Batcher<IterResult2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); let mut errs = vec![]; for _i in 0..self.batch_size { match self.inner.inner.next() { Some(Ok((x, y))) => { xs.push(x); ys.push(y) } Some(Err(err)) => errs.push(err), None => { if self.return_last_incomplete_batch { break; } return None; } } } if !errs.is_empty() { return Some(Err(errs.swap_remove(0))); } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } }
candle/candle-datasets/src/batcher.rs/0
{ "file_path": "candle/candle-datasets/src/batcher.rs", "repo_id": "candle", "token_count": 2660 }
20
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Result, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use tokenizers::Tokenizer; enum Model { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl Model { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor> { match self { Self::M(m) => m.text_decoder().forward(xs, img_xs), Self::Q(m) => m.text_decoder().forward(xs, img_xs), } } } // TODO: Maybe add support for the conditional prompt. #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Use the quantized version of the model. #[arg(long)] quantized: bool, } const SEP_TOKEN_ID: u32 = 102; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 384, 384). OpenAI normalization is applied. pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], &Device::Cpu)? .reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; if args.quantized { let api = api.model("lmz/candle-blip".to_string()); api.get("blip-image-captioning-large-q4k.gguf")? } else { let api = api.repo(hf_hub::Repo::with_revision( "Salesforce/blip-image-captioning-large".to_string(), hf_hub::RepoType::Model, "refs/pr/18".to_string(), )); api.get("model.safetensors")? } } Some(model) => model.into(), }; let tokenizer = match args.tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("Salesforce/blip-image-captioning-large".to_string()); api.get("tokenizer.json")? } Some(file) => file.into(), }; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let mut tokenizer = TokenOutputStream::new(tokenizer); let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let config = blip::Config::image_captioning_large(); let device = candle_examples::device(args.cpu)?; let (image_embeds, device, mut model) = if args.quantized { let device = Device::Cpu; let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = quantized_blip::VarBuilder::from_gguf(model_file, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::Q(model)) } else { let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::M(model)) }; let mut token_ids = vec![30522u32]; for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = tokenizer.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/blip/main.rs/0
{ "file_path": "candle/candle-examples/examples/blip/main.rs", "repo_id": "candle", "token_count": 2437 }
21
# candle-endocec [EnCodec](https://huggingface.co/facebook/encodec_24khz) is a high-quality audio compression model using an encoder/decoder architecture with residual vector quantization. ## Running one example ```bash cargo run --example encodec --features symphonia --release -- code-to-audio \ candle-examples/examples/encodec/jfk-codes.safetensors \ jfk.wav ``` This decodes the EnCodec tokens stored in `jfk-codes.safetensors` and generates an output wav file containing the audio data. Instead of `code-to-audio` one can use: - `audio-to-audio in.mp3 out.wav`: encodes the input audio file then decodes it to a wav file. - `audio-to-code in.mp3 out.safetensors`: generates a safetensors file containing EnCodec tokens for the input audio file. If the audio output file name is set to `-`, the audio content directly gets played on default audio output device. If the audio input file is set to `-`, the audio gets recorded from the default audio input.
candle/candle-examples/examples/encodec/README.md/0
{ "file_path": "candle/candle-examples/examples/encodec/README.md", "repo_id": "candle", "token_count": 305 }
22
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; mod model; use model::{Config, Model}; use candle::{DType, Device, Module, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let input = Tensor::new(tokens.as_slice(), &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Mamba130m, Mamba370m, Mamba790m, Mamba1_4b, Mamba2_8b, Mamba2_8bSlimPj, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Mamba130m => "state-spaces/mamba-130m", Self::Mamba370m => "state-spaces/mamba-370m", Self::Mamba790m => "state-spaces/mamba-790m", Self::Mamba1_4b => "state-spaces/mamba-1.4b", Self::Mamba2_8b => "state-spaces/mamba-2.8b", Self::Mamba2_8bSlimPj => "state-spaces/mamba-2.8b-slimpj'", } } fn revision(&self) -> &'static str { match self { Self::Mamba130m | Self::Mamba370m | Self::Mamba790m | Self::Mamba1_4b | Self::Mamba2_8bSlimPj => "refs/pr/1", Self::Mamba2_8b => "refs/pr/4", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "mamba130m")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => api .model("EleutherAI/gpt-neox-20b".to_string()) .get("tokenizer.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { vec![repo.get("model.safetensors")?] } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let model = Model::new(&config, vb.pp("backbone"))?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mamba-minimal/main.rs/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/main.rs", "repo_id": "candle", "token_count": 4087 }
23
#![allow(dead_code)] // https://huggingface.co/facebook/musicgen-small/tree/main // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/musicgen/modeling_musicgen.py // TODO: Add an offline mode. // TODO: Add a KV cache. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod musicgen_model; use musicgen_model::{GenConfig, MusicgenForConditionalGeneration}; use anyhow::{Error as E, Result}; use candle::{DType, Tensor}; use candle_nn::VarBuilder; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; const DTYPE: DType = DType::F32; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, /// The tokenizer config. #[arg(long)] tokenizer: Option<String>, #[arg( long, default_value = "90s rock song with loud guitars and heavy drums" )] prompt: String, } fn main() -> Result<()> { use tokenizers::Tokenizer; let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let tokenizer = match args.tokenizer { Some(tokenizer) => std::path::PathBuf::from(tokenizer), None => Api::new()? .model("facebook/musicgen-small".to_string()) .get("tokenizer.json")?, }; let mut tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .repo(Repo::with_revision( "facebook/musicgen-small".to_string(), RepoType::Model, "refs/pr/13".to_string(), )) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DTYPE, &device)? }; let config = GenConfig::small(); let mut model = MusicgenForConditionalGeneration::load(vb, config)?; let tokens = tokenizer .encode(args.prompt.as_str(), true) .map_err(E::msg)? .get_ids() .to_vec(); println!("tokens: {tokens:?}"); let tokens = Tensor::new(tokens.as_slice(), &device)?.unsqueeze(0)?; println!("{tokens:?}"); let embeds = model.text_encoder.forward(&tokens)?; println!("{embeds}"); Ok(()) }
candle/candle-examples/examples/musicgen/main.rs/0
{ "file_path": "candle/candle-examples/examples/musicgen/main.rs", "repo_id": "candle", "token_count": 1151 }
24
use std::collections::VecDeque; use rand::distributions::Uniform; use rand::{thread_rng, Rng}; use candle::{DType, Device, Module, Result, Tensor}; use candle_nn::loss::mse; use candle_nn::{linear, seq, Activation, AdamW, Optimizer, VarBuilder, VarMap}; use crate::gym_env::GymEnv; const DEVICE: Device = Device::Cpu; const EPISODES: usize = 200; const BATCH_SIZE: usize = 64; const GAMMA: f64 = 0.99; const LEARNING_RATE: f64 = 0.01; pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; // Build the model that predicts the estimated rewards given a specific state. let var_map = VarMap::new(); let vb = VarBuilder::from_varmap(&var_map, DType::F32, &DEVICE); let observation_space = *env.observation_space().first().unwrap(); let model = seq() .add(linear(observation_space, 64, vb.pp("linear_in"))?) .add(Activation::Relu) .add(linear(64, env.action_space(), vb.pp("linear_out"))?); let mut optimizer = AdamW::new_lr(var_map.all_vars(), LEARNING_RATE)?; // Initialize the model's memory. let mut memory = VecDeque::with_capacity(10000); // Start the training loop. let mut state = env.reset(0)?; let mut episode = 0; let mut accumulate_rewards = 0.0; while episode < EPISODES { // Given the current state, predict the estimated rewards, and take the // action that is expected to return the most rewards. let estimated_rewards = model.forward(&state.unsqueeze(0)?)?; let action: u32 = estimated_rewards.squeeze(0)?.argmax(0)?.to_scalar()?; // Take that action in the environment, and memorize the outcome: // - the state for which the action was taken // - the action taken // - the new state resulting of taking that action // - the actual rewards of taking that action // - whether the environment reached a terminal state or not (e.g. game over) let step = env.step(action)?; accumulate_rewards += step.reward; memory.push_back(( state, action, step.state.clone(), step.reward, step.terminated || step.truncated, )); state = step.state; // If there's enough entries in the memory, perform a learning step, where // BATCH_SIZE transitions will be sampled from the memory and will be // fed to the model so that it performs a backward pass. if memory.len() > BATCH_SIZE { // Sample randomly from the memory. let batch = thread_rng() .sample_iter(Uniform::from(0..memory.len())) .take(BATCH_SIZE) .map(|i| memory.get(i).unwrap().clone()) .collect::<Vec<_>>(); // Group all the samples together into tensors with the appropriate shape. let states: Vec<_> = batch.iter().map(|e| e.0.clone()).collect(); let states = Tensor::stack(&states, 0)?; let actions = batch.iter().map(|e| e.1); let actions = Tensor::from_iter(actions, &DEVICE)?.unsqueeze(1)?; let next_states: Vec<_> = batch.iter().map(|e| e.2.clone()).collect(); let next_states = Tensor::stack(&next_states, 0)?; let rewards = batch.iter().map(|e| e.3 as f32); let rewards = Tensor::from_iter(rewards, &DEVICE)?.unsqueeze(1)?; let non_final_mask = batch.iter().map(|e| !e.4 as u8 as f32); let non_final_mask = Tensor::from_iter(non_final_mask, &DEVICE)?.unsqueeze(1)?; // Get the estimated rewards for the actions that where taken at each step. let estimated_rewards = model.forward(&states)?; let x = estimated_rewards.gather(&actions, 1)?; // Get the maximum expected rewards for the next state, apply them a discount rate // GAMMA and add them to the rewards that were actually gathered on the current state. // If the next state is a terminal state, just omit maximum estimated // rewards for that state. let expected_rewards = model.forward(&next_states)?.detach(); let y = expected_rewards.max_keepdim(1)?; let y = (y * GAMMA * non_final_mask + rewards)?; // Compare the estimated rewards with the maximum expected rewards and // perform the backward step. let loss = mse(&x, &y)?; optimizer.backward_step(&loss)?; } // If we are on a terminal state, reset the environment and log how it went. if step.terminated || step.truncated { episode += 1; println!("Episode {episode} | Rewards {}", accumulate_rewards as i64); state = env.reset(0)?; accumulate_rewards = 0.0; } } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/dqn.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/dqn.rs", "repo_id": "candle", "token_count": 2032 }
25
use candle::Device; use candle::Module; use candle_nn::VarBuilder; use candle_transformers::models::segformer::{ Config, ImageClassificationModel, SemanticSegmentationModel, }; use clap::{Args, Parser, Subcommand}; use image::Rgb; use imageproc::integral_image::ArrayData; use std::collections::HashMap; use std::path::PathBuf; #[derive(Parser)] #[clap(about, version, long_about = None)] struct CliArgs { #[arg(long, help = "use cpu")] cpu: bool, #[command(subcommand)] command: Commands, } #[derive(Args, Debug)] struct SegmentationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "nvidia/segformer-b0-finetuned-ade-512-512" )] model_name: String, #[arg( long, help = "path to the label file in json format", default_value = "candle-examples/examples/segformer/assets/labels.json" )] label_path: PathBuf, #[arg(long, help = "path to for the output mask image")] output_path: PathBuf, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Args, Debug)] struct ClassificationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "paolinox/segformer-finetuned-food101" )] model_name: String, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Subcommand, Debug)] enum Commands { Segment(SegmentationArgs), Classify(ClassificationArgs), } fn get_vb_and_config(model_name: String, device: &Device) -> anyhow::Result<(VarBuilder, Config)> { println!("loading model {} via huggingface hub", model_name); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name.clone()); let model_file = api.get("model.safetensors")?; println!("model {} downloaded and loaded", model_name); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], candle::DType::F32, device)? }; let config = std::fs::read_to_string(api.get("config.json")?)?; let config: Config = serde_json::from_str(&config)?; println!("{:?}", config); Ok((vb, config)) } #[derive(Debug, serde::Deserialize)] struct LabelItem { index: u32, color: String, } fn segmentation_task(args: SegmentationArgs, device: &Device) -> anyhow::Result<()> { let label_file = std::fs::read_to_string(&args.label_path)?; let label_items: Vec<LabelItem> = serde_json::from_str(&label_file)?; let label_colors: HashMap<u32, Rgb<u8>> = label_items .iter() .map(|x| { (x.index - 1, { let color = x.color.trim_start_matches('#'); let r = u8::from_str_radix(&color[0..2], 16).unwrap(); let g = u8::from_str_radix(&color[2..4], 16).unwrap(); let b = u8::from_str_radix(&color[4..6], 16).unwrap(); Rgb([r, g, b]) }) }) .collect(); let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = label_items.len(); let model = SemanticSegmentationModel::new(&config, num_labels, vb)?; let segmentations = model.forward(&image)?; // generate a mask image let mask = &segmentations.squeeze(0)?.argmax(0)?; let (h, w) = mask.dims2()?; let mask = mask.flatten_all()?.to_vec1::<u32>()?; let mask = mask .iter() .flat_map(|x| label_colors[x].data()) .collect::<Vec<u8>>(); let mask: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = image::ImageBuffer::from_raw(w as u32, h as u32, mask).unwrap(); // resize let mask = image::DynamicImage::from(mask); let mask = mask.resize_to_fill( w as u32 * 4, h as u32 * 4, image::imageops::FilterType::CatmullRom, ); mask.save(args.output_path.clone())?; println!("mask image saved to {:?}", args.output_path); Ok(()) } fn classification_task(args: ClassificationArgs, device: &Device) -> anyhow::Result<()> { let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = 7; let model = ImageClassificationModel::new(&config, num_labels, vb)?; let classification = model.forward(&image)?; let classification = candle_nn::ops::softmax_last_dim(&classification)?; let classification = classification.squeeze(0)?; println!( "classification logits {:?}", classification.to_vec1::<f32>()? ); let label_id = classification.argmax(0)?.to_scalar::<u32>()?; let label_id = format!("{}", label_id); println!("label: {}", config.id2label[&label_id]); Ok(()) } pub fn main() -> anyhow::Result<()> { let args = CliArgs::parse(); let device = candle_examples::device(args.cpu)?; if let Commands::Segment(args) = args.command { segmentation_task(args, &device)? } else if let Commands::Classify(args) = args.command { classification_task(args, &device)? } Ok(()) }
candle/candle-examples/examples/segformer/main.rs/0
{ "file_path": "candle/candle-examples/examples/segformer/main.rs", "repo_id": "candle", "token_count": 2226 }
26
use image::{DynamicImage, ImageBuffer}; use serde::Deserialize; use std::collections::HashMap; use candle::{DType, Device, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ProcessorConfig { do_resize: bool, height: u32, width: u32, do_rescale: bool, do_normalize: bool, image_mean: Vec<f32>, image_std: Vec<f32>, } impl Default for ProcessorConfig { fn default() -> Self { Self { do_resize: true, height: 384, width: 384, do_rescale: true, do_normalize: true, image_mean: vec![0.5, 0.5, 0.5], image_std: vec![0.5, 0.5, 0.5], } } } pub struct ViTImageProcessor { do_resize: bool, height: u32, width: u32, do_normalize: bool, image_mean: Vec<f32>, image_std: Vec<f32>, } impl ViTImageProcessor { pub fn new(config: &ProcessorConfig) -> Self { Self { do_resize: config.do_resize, height: config.height, width: config.width, do_normalize: config.do_normalize, image_mean: config.image_mean.clone(), image_std: config.image_std.clone(), } } pub fn preprocess(&self, images: Vec<&str>) -> Result<Tensor> { let height = self.height as usize; let width = self.width as usize; let channels = 3; let images = self.load_images(images)?; let resized_images: Vec<DynamicImage> = if self.do_resize { images .iter() .map(|image| self.resize(image.clone(), None).unwrap()) .collect() } else { images }; let normalized_images: Vec<Tensor> = if self.do_normalize { resized_images .iter() .map(|image| self.normalize(image.clone(), None, None).unwrap()) .collect() } else { let resized_images: Vec<ImageBuffer<image::Rgb<u8>, Vec<u8>>> = resized_images.iter().map(|image| image.to_rgb8()).collect(); let data = resized_images .into_iter() .map(|image| image.into_raw()) .collect::<Vec<Vec<u8>>>(); data.iter() .map(|image| { Tensor::from_vec(image.clone(), (height, width, channels), &Device::Cpu) .unwrap() .permute((2, 0, 1)) .unwrap() }) .collect::<Vec<Tensor>>() }; Tensor::stack(&normalized_images, 0) } fn resize( &self, image: image::DynamicImage, size: Option<HashMap<String, u32>>, ) -> Result<image::DynamicImage> { let (height, width) = match &size { Some(size) => (size.get("height").unwrap(), size.get("width").unwrap()), None => (&self.height, &self.width), }; let resized_image = image.resize_exact(*width, *height, image::imageops::FilterType::Triangle); Ok(resized_image) } fn normalize( &self, image: image::DynamicImage, mean: Option<Vec<f32>>, std: Option<Vec<f32>>, ) -> Result<Tensor> { let mean = match mean { Some(mean) => mean, None => self.image_mean.clone(), }; let std = match std { Some(std) => std, None => self.image_std.clone(), }; let mean = Tensor::from_vec(mean, (3, 1, 1), &Device::Cpu)?; let std = Tensor::from_vec(std, (3, 1, 1), &Device::Cpu)?; let image = image.to_rgb8(); let data = image.into_raw(); let height = self.height as usize; let width = self.width as usize; let channels = 3; let data = Tensor::from_vec(data, &[height, width, channels], &Device::Cpu)?.permute((2, 0, 1))?; (data.to_dtype(DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub fn load_images(&self, image_path: Vec<&str>) -> Result<Vec<image::DynamicImage>> { let mut images: Vec<image::DynamicImage> = Vec::new(); for path in image_path { let img = image::io::Reader::open(path)?.decode().unwrap(); images.push(img); } Ok(images) } }
candle/candle-examples/examples/trocr/image_processor.rs/0
{ "file_path": "candle/candle-examples/examples/trocr/image_processor.rs", "repo_id": "candle", "token_count": 2274 }
27
# candle-wuerstchen: Efficient Pretraining of Text-to-Image Models ![anthropomorphic cat dressed as a fire fighter](./assets/cat.jpg) The `wuerstchen` example is a port of the [diffusers implementation](https://github.com/huggingface/diffusers/tree/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen) for Würstchen v2. The candle implementation reproduces the same structure/files for models and pipelines. Useful resources: - [Official implementation](https://github.com/dome272/Wuerstchen). - [Arxiv paper](https://arxiv.org/abs/2306.00637). - Blog post: [Introducing Würstchen: Fast Diffusion for Image Generation](https://huggingface.co/blog/wuerstchen). ## Getting the weights The weights are automatically downloaded for you from the [HuggingFace Hub](https://huggingface.co/) on the first run. There are various command line flags to use local files instead, run with `--help` to learn about them. ## Running some example. ```bash cargo run --example wuerstchen --release --features cuda,cudnn -- \ --prompt "Anthropomorphic cat dressed as a fire fighter" ``` The final image is named `sd_final.png` by default.
candle/candle-examples/examples/wuerstchen/README.md/0
{ "file_path": "candle/candle-examples/examples/wuerstchen/README.md", "repo_id": "candle", "token_count": 358 }
28
// Copied from https://github.com/ruuda/bs1770/blob/master/src/lib.rs // BS1770 -- Loudness analysis library conforming to ITU-R BS.1770 // Copyright 2020 Ruud van Asseldonk // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // A copy of the License has been included in the root of the repository. //! Loudness analysis conforming to [ITU-R BS.1770-4][bs17704]. //! //! This library offers the building blocks to perform BS.1770 loudness //! measurements, but you need to put the pieces together yourself. //! //! [bs17704]: https://www.itu.int/rec/R-REC-BS.1770-4-201510-I/en //! //! # Stereo integrated loudness example //! //! ```ignore //! # fn load_stereo_audio() -> [Vec<i16>; 2] { //! # [vec![0; 48_000], vec![0; 48_000]] //! # } //! # //! let sample_rate_hz = 44_100; //! let bits_per_sample = 16; //! let channel_samples: [Vec<i16>; 2] = load_stereo_audio(); //! //! // When converting integer samples to float, note that the maximum amplitude //! // is `1 << (bits_per_sample - 1)`, one bit is the sign bit. //! let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32; //! //! let channel_power: Vec<_> = channel_samples.iter().map(|samples| { //! let mut meter = bs1770::ChannelLoudnessMeter::new(sample_rate_hz); //! meter.push(samples.iter().map(|&s| s as f32 * normalizer)); //! meter.into_100ms_windows() //! }).collect(); //! //! let stereo_power = bs1770::reduce_stereo( //! channel_power[0].as_ref(), //! channel_power[1].as_ref(), //! ); //! //! let gated_power = bs1770::gated_mean( //! stereo_power.as_ref() //! ).unwrap_or(bs1770::Power(0.0)); //! println!("Integrated loudness: {:.1} LUFS", gated_power.loudness_lkfs()); //! ``` use std::f32; /// Coefficients for a 2nd-degree infinite impulse response filter. /// /// Coefficient a0 is implicitly 1.0. #[derive(Clone)] struct Filter { a1: f32, a2: f32, b0: f32, b1: f32, b2: f32, // The past two input and output samples. x1: f32, x2: f32, y1: f32, y2: f32, } impl Filter { /// Stage 1 of th BS.1770-4 pre-filter. pub fn high_shelf(sample_rate_hz: f32) -> Filter { // Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136. let gain_db = 3.999_843_8; let q = 0.707_175_25; let center_hz = 1_681.974_5; // Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L134-L143. let k = (f32::consts::PI * center_hz / sample_rate_hz).tan(); let vh = 10.0_f32.powf(gain_db / 20.0); let vb = vh.powf(0.499_666_78); let a0 = 1.0 + k / q + k * k; Filter { b0: (vh + vb * k / q + k * k) / a0, b1: 2.0 * (k * k - vh) / a0, b2: (vh - vb * k / q + k * k) / a0, a1: 2.0 * (k * k - 1.0) / a0, a2: (1.0 - k / q + k * k) / a0, x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0, } } /// Stage 2 of th BS.1770-4 pre-filter. pub fn high_pass(sample_rate_hz: f32) -> Filter { // Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136. let q = 0.500_327_05; let center_hz = 38.135_47; // Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L145-L151 let k = (f32::consts::PI * center_hz / sample_rate_hz).tan(); Filter { a1: 2.0 * (k * k - 1.0) / (1.0 + k / q + k * k), a2: (1.0 - k / q + k * k) / (1.0 + k / q + k * k), b0: 1.0, b1: -2.0, b2: 1.0, x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0, } } /// Feed the next input sample, get the next output sample. #[inline(always)] pub fn apply(&mut self, x0: f32) -> f32 { let y0 = 0.0 + self.b0 * x0 + self.b1 * self.x1 + self.b2 * self.x2 - self.a1 * self.y1 - self.a2 * self.y2; self.x2 = self.x1; self.x1 = x0; self.y2 = self.y1; self.y1 = y0; y0 } } /// Compensated sum, for summing many values of different orders of magnitude /// accurately. #[derive(Copy, Clone, PartialEq)] struct Sum { sum: f32, residue: f32, } impl Sum { #[inline(always)] fn zero() -> Sum { Sum { sum: 0.0, residue: 0.0, } } #[inline(always)] fn add(&mut self, x: f32) { let sum = self.sum + (self.residue + x); self.residue = (self.residue + x) - (sum - self.sum); self.sum = sum; } } /// The mean of the squares of the K-weighted samples in a window of time. /// /// K-weighted power is equivalent to K-weighted loudness, the only difference /// is one of scale: power is quadratic in sample amplitudes, whereas loudness /// units are logarithmic. `loudness_lkfs` and `from_lkfs` convert between power, /// and K-weighted Loudness Units relative to nominal Full Scale (LKFS). /// /// The term “LKFS” (Loudness Units, K-Weighted, relative to nominal Full Scale) /// is used in BS.1770-4 to emphasize K-weighting, but the term is otherwise /// interchangeable with the more widespread term “LUFS” (Loudness Units, /// relative to Full Scale). Loudness units are related to decibels in the /// following sense: boosting a signal that has a loudness of /// -<var>L<sub>K</sub></var> LUFS by <var>L<sub>K</sub></var> dB (by /// multiplying the amplitude by 10<sup><var>L<sub>K</sub></var>/20</sup>) will /// bring the loudness to 0 LUFS. /// /// K-weighting refers to a high-shelf and high-pass filter that model the /// effect that humans perceive a certain amount of power in low frequencies to /// be less loud than the same amount of power in higher frequencies. In this /// library the `Power` type is used exclusively to refer to power after applying K-weighting. /// /// The nominal “full scale” is the range [-1.0, 1.0]. Because the power is the /// mean square of the samples, if no input samples exceeded the full scale, the /// power will be in the range [0.0, 1.0]. However, the power delivered by /// multiple channels, which is a weighted sum over individual channel powers, /// can exceed this range, because the weighted sum is not normalized. #[derive(Copy, Clone, PartialEq, PartialOrd)] pub struct Power(pub f32); impl Power { /// Convert Loudness Units relative to Full Scale into a squared sample amplitude. /// /// This is the inverse of `loudness_lkfs`. pub fn from_lkfs(lkfs: f32) -> Power { // The inverse of the formula below. Power(10.0_f32.powf((lkfs + 0.691) * 0.1)) } /// Return the loudness of this window in Loudness Units, K-weighted, relative to Full Scale. /// /// This is the inverse of `from_lkfs`. pub fn loudness_lkfs(&self) -> f32 { // Equation 2 (p.5) of BS.1770-4. -0.691 + 10.0 * self.0.log10() } } /// A `T` value for non-overlapping windows of audio, 100ms in length. /// /// The `ChannelLoudnessMeter` applies K-weighting and then produces the power /// for non-overlapping windows of 100ms duration. /// /// These non-overlapping 100ms windows can later be combined into overlapping /// windows of 400ms, spaced 100ms apart, to compute instantaneous loudness or /// to perform a gated measurement, or they can be combined into even larger /// windows for a momentary loudness measurement. #[derive(Copy, Clone, Debug)] pub struct Windows100ms<T> { pub inner: T, } impl<T> Windows100ms<T> { /// Wrap a new empty vector. pub fn new() -> Windows100ms<Vec<T>> { Windows100ms { inner: Vec::new() } } /// Apply `as_ref` to the inner value. pub fn as_ref(&self) -> Windows100ms<&[Power]> where T: AsRef<[Power]>, { Windows100ms { inner: self.inner.as_ref(), } } /// Apply `as_mut` to the inner value. pub fn as_mut(&mut self) -> Windows100ms<&mut [Power]> where T: AsMut<[Power]>, { Windows100ms { inner: self.inner.as_mut(), } } #[allow(clippy::len_without_is_empty)] /// Apply `len` to the inner value. pub fn len(&self) -> usize where T: AsRef<[Power]>, { self.inner.as_ref().len() } } /// Measures K-weighted power of non-overlapping 100ms windows of a single channel of audio. /// /// # Output /// /// The output of the meter is an intermediate result in the form of power for /// 100ms non-overlapping windows. The windows need to be processed further to /// get one of the instantaneous, momentary, and integrated loudness /// measurements defined in BS.1770. /// /// The windows can also be inspected directly; the data is meaningful /// on its own (the K-weighted power delivered in that window of time), but it /// is not something that BS.1770 defines a term for. /// /// # Multichannel audio /// /// To perform a loudness measurement of multichannel audio, construct a /// `ChannelLoudnessMeter` per channel, and later combine the measured power /// with e.g. `reduce_stereo`. /// /// # Instantaneous loudness /// /// The instantaneous loudness is the power over a 400ms window, so you can /// average four 100ms windows. No special functionality is implemented to help /// with that at this time. ([Pull requests would be accepted.][contribute]) /// /// # Momentary loudness /// /// The momentary loudness is the power over a 3-second window, so you can /// average thirty 100ms windows. No special functionality is implemented to /// help with that at this time. ([Pull requests would be accepted.][contribute]) /// /// # Integrated loudness /// /// Use `gated_mean` to perform an integrated loudness measurement: /// /// ```ignore /// # use std::iter; /// # use bs1770::{ChannelLoudnessMeter, gated_mean}; /// # let sample_rate_hz = 44_100; /// # let samples_per_100ms = sample_rate_hz / 10; /// # let mut meter = ChannelLoudnessMeter::new(sample_rate_hz); /// # meter.push((0..44_100).map(|i| (i as f32 * 0.01).sin())); /// let integrated_loudness_lkfs = gated_mean(meter.as_100ms_windows()) /// .unwrap_or(bs1770::Power(0.0)) /// .loudness_lkfs(); /// ``` /// /// [contribute]: https://github.com/ruuda/bs1770/blob/master/CONTRIBUTING.md #[derive(Clone)] pub struct ChannelLoudnessMeter { /// The number of samples that fit in 100ms of audio. samples_per_100ms: u32, /// Stage 1 filter (head effects, high shelf). filter_stage1: Filter, /// Stage 2 filter (high-pass). filter_stage2: Filter, /// Sum of the squares over non-overlapping windows of 100ms. windows: Windows100ms<Vec<Power>>, /// The number of samples in the current unfinished window. count: u32, /// The sum of the squares of the samples in the current unfinished window. square_sum: Sum, } impl ChannelLoudnessMeter { /// Construct a new loudness meter for the given sample rate. pub fn new(sample_rate_hz: u32) -> ChannelLoudnessMeter { ChannelLoudnessMeter { samples_per_100ms: sample_rate_hz / 10, filter_stage1: Filter::high_shelf(sample_rate_hz as f32), filter_stage2: Filter::high_pass(sample_rate_hz as f32), windows: Windows100ms::new(), count: 0, square_sum: Sum::zero(), } } /// Feed input samples for loudness analysis. /// /// # Full scale /// /// Full scale for the input samples is the interval [-1.0, 1.0]. If your /// input consists of signed integer samples, you can convert as follows: /// /// ```ignore /// # let mut meter = bs1770::ChannelLoudnessMeter::new(44_100); /// # let bits_per_sample = 16_usize; /// # let samples = &[0_i16]; /// // Note that the maximum amplitude is `1 << (bits_per_sample - 1)`, /// // one bit is the sign bit. /// let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32; /// meter.push(samples.iter().map(|&s| s as f32 * normalizer)); /// ``` /// /// # Repeated calls /// /// You can call `push` multiple times to feed multiple batches of samples. /// This is equivalent to feeding a single chained iterator. The leftover of /// samples that did not fill a full 100ms window is not discarded: /// /// ```ignore /// # use std::iter; /// # use bs1770::ChannelLoudnessMeter; /// let sample_rate_hz = 44_100; /// let samples_per_100ms = sample_rate_hz / 10; /// let mut meter = ChannelLoudnessMeter::new(sample_rate_hz); /// /// meter.push(iter::repeat(0.0).take(samples_per_100ms as usize - 1)); /// assert_eq!(meter.as_100ms_windows().len(), 0); /// /// meter.push(iter::once(0.0)); /// assert_eq!(meter.as_100ms_windows().len(), 1); /// ``` pub fn push<I: Iterator<Item = f32>>(&mut self, samples: I) { let normalizer = 1.0 / self.samples_per_100ms as f32; // LLVM, if you could go ahead and inline those apply calls, and then // unroll and vectorize the loop, that'd be terrific. for x in samples { let y = self.filter_stage1.apply(x); let z = self.filter_stage2.apply(y); self.square_sum.add(z * z); self.count += 1; // TODO: Should this branch be marked cold? if self.count == self.samples_per_100ms { let mean_squares = Power(self.square_sum.sum * normalizer); self.windows.inner.push(mean_squares); // We intentionally do not reset the residue. That way, leftover // energy from this window is not lost, so for the file overall, // the sum remains more accurate. self.square_sum.sum = 0.0; self.count = 0; } } } /// Return a reference to the 100ms windows analyzed so far. pub fn as_100ms_windows(&self) -> Windows100ms<&[Power]> { self.windows.as_ref() } /// Return all 100ms windows analyzed so far. pub fn into_100ms_windows(self) -> Windows100ms<Vec<Power>> { self.windows } } /// Combine power for multiple channels by taking a weighted sum. /// /// Note that BS.1770-4 defines power for a multi-channel signal as a weighted /// sum over channels which is not normalized. This means that a stereo signal /// is inherently louder than a mono signal. For a mono signal played back on /// stereo speakers, you should therefore still apply `reduce_stereo`, passing /// in the same signal for both channels. pub fn reduce_stereo( left: Windows100ms<&[Power]>, right: Windows100ms<&[Power]>, ) -> Windows100ms<Vec<Power>> { assert_eq!( left.len(), right.len(), "Channels must have the same length." ); let mut result = Vec::with_capacity(left.len()); for (l, r) in left.inner.iter().zip(right.inner) { result.push(Power(l.0 + r.0)); } Windows100ms { inner: result } } /// In-place version of `reduce_stereo` that stores the result in the former left channel. pub fn reduce_stereo_in_place(left: Windows100ms<&mut [Power]>, right: Windows100ms<&[Power]>) { assert_eq!( left.len(), right.len(), "Channels must have the same length." ); for (l, r) in left.inner.iter_mut().zip(right.inner) { l.0 += r.0; } } /// Perform gating and averaging for a BS.1770-4 integrated loudness measurement. /// /// The integrated loudness measurement is not just the average power over the /// entire signal. BS.1770-4 defines defines two stages of gating that exclude /// parts of the signal, to ensure that silent parts do not contribute to the /// loudness measurment. This function performs that gating, and returns the /// average power over the windows that were not excluded. /// /// The result of this function is the integrated loudness measurement. /// /// When no signal remains after applying the gate, this function returns /// `None`. In particular, this happens when all of the signal is softer than /// -70 LKFS, including a signal that consists of pure silence. pub fn gated_mean(windows_100ms: Windows100ms<&[Power]>) -> Option<Power> { let mut gating_blocks = Vec::with_capacity(windows_100ms.len()); // Stage 1: an absolute threshold of -70 LKFS. (Equation 6, p.6.) let absolute_threshold = Power::from_lkfs(-70.0); // Iterate over all 400ms windows. for window in windows_100ms.inner.windows(4) { // Note that the sum over channels has already been performed at this point. let gating_block_power = Power(0.25 * window.iter().map(|mean| mean.0).sum::<f32>()); if gating_block_power > absolute_threshold { gating_blocks.push(gating_block_power); } } if gating_blocks.is_empty() { return None; } // Compute the loudness after applying the absolute gate, in order to // determine the threshold for the relative gate. let mut sum_power = Sum::zero(); for &gating_block_power in &gating_blocks { sum_power.add(gating_block_power.0); } let absolute_gated_power = Power(sum_power.sum / (gating_blocks.len() as f32)); // Stage 2: Apply the relative gate. let relative_threshold = Power::from_lkfs(absolute_gated_power.loudness_lkfs() - 10.0); let mut sum_power = Sum::zero(); let mut n_blocks = 0_usize; for &gating_block_power in &gating_blocks { if gating_block_power > relative_threshold { sum_power.add(gating_block_power.0); n_blocks += 1; } } if n_blocks == 0 { return None; } let relative_gated_power = Power(sum_power.sum / n_blocks as f32); Some(relative_gated_power) }
candle/candle-examples/src/bs1770.rs/0
{ "file_path": "candle/candle-examples/src/bs1770.rs", "repo_id": "candle", "token_count": 7223 }
29
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include "cute/algorithm/copy.hpp" #include "cutlass/cutlass.h" #include "cutlass/layout/layout.h" #include <cutlass/numeric_types.h> using namespace cute; template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, typename elem_type=cutlass::half_t> struct Flash_kernel_traits_sm90 { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using Element = elem_type; static constexpr bool Has_cp_async = true; #else using Element = cutlass::half_t; static constexpr bool Has_cp_async = false; #endif using ElementAccum = float; using index_t = uint32_t; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using MMA_Atom_Arch = std::conditional_t< std::is_same_v<elem_type, cutlass::half_t>, MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>, MMA_Atom<SM80_16x8x16_F32BF16BF16F32_TN> >; using ValLayoutMNK = Layout<Shape<_1, _2, _1>>; #else using MMA_Atom_Arch = MMA_Atom<SM75_16x8x8_F32F16F16F32_TN>; using ValLayoutMNK = Layout<Shape<_1, _2, _2>>; #endif #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<SM75_U16x8_LDSM_T, elem_type>; #else using SmemCopyAtom = Copy_Atom<DefaultCopy, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<DefaultCopy, elem_type>; #endif }; template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, bool Is_Q_in_regs_=false, bool Share_Q_K_smem_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits_sm90<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_fwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Share_Q_K_smem = Share_Q_K_smem_; static constexpr bool Is_Q_in_regs = Is_Q_in_regs_ || Share_Q_K_smem; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; using TiledMma = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<kNWarps>,_1,_1>>, // 4x1x1 or 8x1x1 thread group typename Base::ValLayoutMNK>; // 1x2x1 or 1x2x2 value group for 16x16x16 MMA and LDSM using SmemLayoutAtomQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, // This has to be kBlockKSmem, using kHeadDim gives wrong results for d=128 Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQ = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemLayoutKV = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockN>, Int<kHeadDim>>{})); using SmemLayoutAtomVtransposed = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, // This has to be kBlockN and not 8, otherwise we get wrong results for d=128 Layout<Shape<Int<kBlockKSmem>, Int<kBlockN>>, Stride<_1, Int<kBlockKSmem>>>{})); using SmemLayoutVtransposed = decltype(tile_to_shape( SmemLayoutAtomVtransposed{}, Shape<Int<kHeadDim>, Int<kBlockN>>{})); // Maybe the VtransposeNoSwizzle just needs to have the right shape // And the strides don't matter? using SmemLayoutVtransposedNoSwizzle = decltype(SmemLayoutVtransposed{}.layout_fn()); using SmemLayoutAtomO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<8>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutO = decltype(tile_to_shape( SmemLayoutAtomO{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemCopyAtomO = Copy_Atom<DefaultCopy, elem_type>; static constexpr int kSmemQCount = size(SmemLayoutQ{}); static constexpr int kSmemKVCount = size(SmemLayoutKV{}) * 2; static constexpr int kSmemQSize = kSmemQCount * sizeof(Element); static constexpr int kSmemKVSize = kSmemKVCount * sizeof(Element); static constexpr int kSmemSize = Share_Q_K_smem ? std::max(kSmemQSize, kSmemKVSize) : kSmemQSize + kSmemKVSize; static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem here is 6-10% faster than kBlockKGmem for d=128 because of bank conflicts. // For example, for d=128, smem is split into 2 "pages", each page takes care of columns // 0-63 and 64-127. If we have 16 threads per row for gmem read, when we write to smem, // thread 0 - 7 will write to the first page and thread 8 - 15 will write to the second page, // to the same banks. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, DefaultCopy >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, elem_type>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopyO = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store static constexpr int kGmemThreadsPerRowP = kBlockN / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRowP == 0, "kNThreads must be a multiple of kGmemThreadsPerRowP"); using GmemLayoutAtomP = Layout<Shape <Int<kNThreads / kGmemThreadsPerRowP>, Int<kGmemThreadsPerRowP>>, Stride<Int<kGmemThreadsPerRowP>, _1>>; using GmemTiledCopyP = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{}, GmemLayoutAtomP{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store }; ////////////////////////////////////////////////////////////////////////////////////////////////////
candle/candle-flash-attn/kernels/kernel_traits_sm90.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_traits_sm90.h", "repo_id": "candle", "token_count": 3269 }
30
#include "cuda_utils.cuh" #include<stdint.h> // Naive implementation of conv1d. template <typename T, typename A> __device__ void conv1d( const size_t src_numel, const size_t l_out, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { // src: (b_size, c_in, l_in) // k: (c_out, c_in, k_size) const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t *k_dims = info + 6; const size_t *k_s = info + 9; const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; const size_t k_size = k_dims[2]; const size_t c_out = k_dims[0]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (dst_i >= src_dims[0] * c_out * l_out) { return; } // TODO const size_t b_idx = dst_i / (l_out * c_out); const size_t dst_c_idx = (dst_i / l_out) % c_out; const size_t dst_l = dst_i % l_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (size_t offset = 0; offset < k_size; ++offset) { size_t src_l = (stride * dst_l + offset) * dilation; if (src_l < padding || src_l >= padding + l_in) { continue; } src_l -= padding; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_l * src_s[2]; const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + offset * k_s[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } dst[dst_i] = static_cast<T>(d); } template <typename T> __device__ void im2col1d( const size_t dst_numel, const size_t l_out, const size_t l_k, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // dst: (b_size, l_out, c_in, l_k) // src: (b_size, c_in, l_in) if (dst_i >= dst_numel) { return; } const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; const size_t dst_s2 = l_k; const size_t dst_s1 = c_in * dst_s2; const size_t dst_s0 = l_out * dst_s1; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t l_idx = tmp_dst_i / dst_s1; tmp_dst_i -= l_idx * dst_s1; const size_t c_idx = tmp_dst_i / dst_s2; tmp_dst_i -= c_idx * dst_s2; const size_t l_k_idx = tmp_dst_i; size_t src_l_idx = l_idx * stride + l_k_idx * dilation; if (src_l_idx < padding || src_l_idx >= l_in + padding) { dst[dst_i] = static_cast<T>(0); } else { src_l_idx -= padding; const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_l_idx * src_s[2]; dst[dst_i] = src[src_i]; } } template <typename T> __device__ void im2col( const size_t dst_numel, const size_t h_out, const size_t w_out, const size_t h_k, const size_t w_k, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // dst: (b_size, h_out, w_out, c_in, h_k, w_k) // src: (b_size, c_in, h_in, w_in) if (dst_i >= dst_numel) { return; } const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; const size_t dst_s4 = w_k; const size_t dst_s3 = h_k * dst_s4; const size_t dst_s2 = c_in * dst_s3; const size_t dst_s1 = w_out * dst_s2; const size_t dst_s0 = h_out * dst_s1; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t h_idx = tmp_dst_i / dst_s1; tmp_dst_i -= h_idx * dst_s1; const size_t w_idx = tmp_dst_i / dst_s2; tmp_dst_i -= w_idx * dst_s2; const size_t c_idx = tmp_dst_i / dst_s3; tmp_dst_i -= c_idx * dst_s3; const size_t h_k_idx = tmp_dst_i / dst_s4; tmp_dst_i -= h_k_idx * dst_s4; const size_t w_k_idx = tmp_dst_i; size_t src_h_idx = h_idx * stride + h_k_idx * dilation; size_t src_w_idx = w_idx * stride + w_k_idx * dilation; if (src_h_idx < padding || src_h_idx >= h_in + padding) { dst[dst_i] = static_cast<T>(0); } else if (src_w_idx < padding || src_w_idx >= w_in + padding) { dst[dst_i] = static_cast<T>(0); } else { src_h_idx -= padding; src_w_idx -= padding; const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_h_idx * src_s[2] + src_w_idx * src_s[3]; dst[dst_i] = src[src_i]; } } // Naive implementation of conv2d. template <typename T, typename A> __device__ void conv2d( const size_t src_numel, const size_t w_out, const size_t h_out, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, h_in, w_in) // k: (c_out, c_in, h_k, w_k) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t *k_dims = info + 8; const size_t *k_s = info + 12; const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[0]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; if (dst_i >= src_dims[0] * c_out * w_out * h_out) { return; } // TODO const size_t b_idx = dst_i / (w_out * h_out * c_out); const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out; // NCHW layout. const size_t dst_h = (dst_i / w_out) % h_out; const size_t dst_w = dst_i % w_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = stride * dst_w + w_offset * dilation; if (src_w < padding || src_w >= w_in + padding) { continue; } src_w -= padding; for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = stride * dst_h + h_offset * dilation; if (src_h < padding || src_h >= h_in + padding) { continue; } src_h -= padding; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_h * src_s[2] + src_w * src_s[3]; const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + h_offset * k_s[2] + w_offset * k_s[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } } dst[dst_i] = static_cast<T>(d); } // Naive implementation of conv_transpose1d. template <typename T, typename A> __device__ void conv_transpose1d( const size_t src_numel, const size_t l_out, const size_t stride, const size_t padding, const size_t out_padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, l_in) // k: (c_in, c_out, l_k) const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t *k_dims = info + 6; const size_t *k_s = info + 9; const size_t l_k = k_dims[2]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (dst_i >= src_dims[0] * c_out * l_out) { return; } // TODO const size_t b_idx = dst_i / (l_out * c_out); const size_t dst_c_idx = (dst_i / l_out) % c_out; // NCL layout. const size_t out_x = dst_i % l_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (int k_x = 0; k_x < (int)l_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= l_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_x * src_s[2]; const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_x * k_s[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } dst[dst_i] = static_cast<T>(d); } // Naive implementation of conv_transpose2d. template <typename T, typename A> __device__ void conv_transpose2d( const size_t src_numel, const size_t w_out, const size_t h_out, const size_t stride, const size_t padding, const size_t out_padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, h_in, w_in) // k: (c_in, c_out, h_k, w_k) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t *k_dims = info + 8; const size_t *k_s = info + 12; const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; if (dst_i >= src_dims[0] * c_out * w_out * h_out) { return; } // TODO const size_t b_idx = dst_i / (w_out * h_out * c_out); const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out; // NCHW layout. const size_t out_y = (dst_i / w_out) % h_out; const size_t out_x = dst_i % w_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (int k_x = 0; k_x < (int)w_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= w_in) continue; for (int k_y = 0; k_y < (int)h_k; ++k_y) { int inp_y_stride = (int)(out_y + padding) - k_y * dilation; if (inp_y_stride < 0 || inp_y_stride % stride) { continue; } int inp_y = inp_y_stride / stride; if (inp_y >= h_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_y * src_s[2] + inp_x * src_s[3]; const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_y * k_s[2] + k_x * k_s[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } } dst[dst_i] = static_cast<T>(d); } template <typename T, typename A> __device__ void avg_pool2d( const size_t src_numel, const size_t w_k, const size_t h_k, const size_t w_stride, const size_t h_stride, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; const size_t src_idx0 = b_idx * src_s[0]; const float scale = 1.0 / (w_k * h_k); A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in) { continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; d += static_cast<A>(src[src_idx]); } } dst[dst_i] = static_cast<T>(d * scale); } template <typename T> __device__ void max_pool2d( const size_t src_numel, const size_t w_k, const size_t h_k, const size_t w_stride, const size_t h_stride, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; const size_t src_idx0 = b_idx * src_s[0]; T d = 0; bool set = false; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in) { continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; if (set) { d = maxg(d, src[src_idx]); } else { d = src[src_idx]; set = true; } } } dst[dst_i] = d; } template <typename T> __device__ void upsample_nearest2d( const size_t w_out, const size_t h_out, const double w_scale, const double h_scale, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; size_t src_w = static_cast<size_t>(dst_w * w_scale); size_t src_h = static_cast<size_t>(dst_h * h_scale); if (src_w >= w_in) { src_w = w_in - 1; } if (src_h >= h_in) { src_h = h_in - 1; } const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; dst[dst_i] = src[src_i]; } #define CONV1D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t num_dims, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv1d<TYPENAME, TYPEACC>(src_numel, num_dims, stride, padding, dilation, info, src, kernel, dst); \ } \ #define CONV2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_out, \ const size_t h_out, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, dilation, info, src, kernel, dst); \ } \ #define IM2COL1D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t dst_numel, \ const size_t l_out, \ const size_t l_k, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ im2col1d<TYPENAME>(dst_numel, l_out, l_k, stride, padding, dilation, info, src, dst); \ } \ #define IM2COL_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t dst_numel, \ const size_t h_out, \ const size_t w_out, \ const size_t h_k, \ const size_t w_k, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ im2col<TYPENAME>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, info, src, dst); \ } \ #define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t l_out, \ const size_t stride, \ const size_t padding, \ const size_t out_padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv_transpose1d<TYPENAME, TYPEACC>(src_numel, l_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \ } \ #define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_out, \ const size_t h_out, \ const size_t stride, \ const size_t padding, \ const size_t out_padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv_transpose2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \ } \ #define AVG_POOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_k, \ const size_t h_k, \ const size_t w_stride, \ const size_t h_stride, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ avg_pool2d<TYPENAME, TYPEACC>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \ } \ #define MAX_POOL2D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_k, \ const size_t h_k, \ const size_t w_stride, \ const size_t h_stride, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ max_pool2d<TYPENAME>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \ } \ #define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t w_out, \ const size_t h_out, \ const double w_scale, \ const double h_scale, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, info, src, dst); \ } \ #if __CUDA_ARCH__ >= 800 CONV1D_OP(__nv_bfloat16, float, conv1d_bf16) CONV2D_OP(__nv_bfloat16, float, conv2d_bf16) CONVT1D_OP(__nv_bfloat16, float, conv_transpose1d_bf16) CONVT2D_OP(__nv_bfloat16, float, conv_transpose2d_bf16) AVG_POOL2D_OP(__nv_bfloat16, float, avg_pool2d_bf16) MAX_POOL2D_OP(__nv_bfloat16, max_pool2d_bf16) UPSAMPLE_NEAREST2D_OP(__nv_bfloat16, upsample_nearest2d_bf16) IM2COL_OP(__nv_bfloat16, im2col_bf16) IM2COL1D_OP(__nv_bfloat16, im2col1d_bf16) #endif #if __CUDA_ARCH__ >= 530 CONV1D_OP(__half, float, conv1d_f16) CONV2D_OP(__half, float, conv2d_f16) CONVT1D_OP(__half, float, conv_transpose1d_f16) CONVT2D_OP(__half, float, conv_transpose2d_f16) AVG_POOL2D_OP(__half, float, avg_pool2d_f16) MAX_POOL2D_OP(__half, max_pool2d_f16) UPSAMPLE_NEAREST2D_OP(__half, upsample_nearest2d_f16) IM2COL_OP(__half, im2col_f16) IM2COL1D_OP(__half, im2col1d_f16) #endif CONV1D_OP(float, float, conv1d_f32) CONV1D_OP(double, double, conv1d_f64) CONV1D_OP(uint8_t, uint8_t, conv1d_u8) CONV1D_OP(uint32_t, uint32_t, conv1d_u32) CONV2D_OP(float, float, conv2d_f32) CONV2D_OP(double, double, conv2d_f64) CONV2D_OP(uint8_t, uint8_t, conv2d_u8) CONV2D_OP(uint32_t, uint32_t, conv2d_u32) CONVT1D_OP(float, float, conv_transpose1d_f32) CONVT1D_OP(double, double, conv_transpose1d_f64) CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8) CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32) CONVT2D_OP(float, float, conv_transpose2d_f32) CONVT2D_OP(double, double, conv_transpose2d_f64) CONVT2D_OP(uint8_t, uint8_t, conv_transpose2d_u8) CONVT2D_OP(uint32_t, uint32_t, conv_transpose2d_u32) AVG_POOL2D_OP(float, float, avg_pool2d_f32) AVG_POOL2D_OP(double, double, avg_pool2d_f64) AVG_POOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8) AVG_POOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32) MAX_POOL2D_OP(float, max_pool2d_f32) MAX_POOL2D_OP(double, max_pool2d_f64) MAX_POOL2D_OP(uint8_t, max_pool2d_u8) MAX_POOL2D_OP(uint32_t, max_pool2d_u32) UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32) UPSAMPLE_NEAREST2D_OP(double, upsample_nearest2d_f64) UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8) UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32) IM2COL_OP(float, im2col_f32) IM2COL_OP(double, im2col_f64) IM2COL_OP(uint8_t, im2col_u8) IM2COL_OP(uint32_t, im2col_u32) IM2COL1D_OP(float, im2col1d_f32) IM2COL1D_OP(double, im2col1d_f64) IM2COL1D_OP(uint8_t, im2col1d_u8) IM2COL1D_OP(uint32_t, im2col1d_u32)
candle/candle-kernels/src/conv.cu/0
{ "file_path": "candle/candle-kernels/src/conv.cu", "repo_id": "candle", "token_count": 10786 }
31
use metal::{ Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState, Device, Function, FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger, }; use std::collections::HashMap; use std::ffi::c_void; use std::sync::RwLock; const AFFINE: &str = include_str!("affine.metal"); const INDEXING: &str = include_str!("indexing.metal"); const UNARY: &str = include_str!("unary.metal"); const BINARY: &str = include_str!("binary.metal"); const TERNARY: &str = include_str!("ternary.metal"); const CAST: &str = include_str!("cast.metal"); const CONV: &str = include_str!("conv.metal"); const REDUCE: &str = include_str!("reduce.metal"); const RANDOM: &str = include_str!("random.metal"); const MFA: &[u8] = include_bytes!("libMetalFlashAttention.metallib"); const QUANTIZED: &str = include_str!("quantized.metal"); /// Most kernels apply similarly across the tensors /// This creates a strategy that uses the maximum amount of threads per threadgroup (capped at the /// actual total buffer length). /// Then kernels can just do their op on their single point in the buffer. fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) { let size = length as u64; let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size); let count = (size + width - 1) / width; let thread_group_count = MTLSize { width: count, height: 1, depth: 1, }; let thread_group_size = MTLSize { width, height: 1, depth: 1, }; (thread_group_count, thread_group_size) } fn set_param<P: EncoderParam>(encoder: &ComputeCommandEncoderRef, position: u64, data: P) { <P as EncoderParam>::set_param(encoder, position, data) } /// Helper functions to create the various objects on the compute command encoder /// on a single line. /// Prevents getting wrong some arguments number and mixing length and size in bytes. trait EncoderParam { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self); } macro_rules! primitive { ($type:ty) => { impl EncoderParam for $type { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of::<$type>() as u64, &data as *const $type as *const c_void, ); } } }; } primitive!(bool); primitive!(usize); primitive!(i32); primitive!(i64); primitive!(u32); primitive!(u64); primitive!(f32); impl<T> EncoderParam for &[T] { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of_val(data) as u64, data.as_ptr() as *const c_void, ); } } impl EncoderParam for &Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } impl EncoderParam for &mut Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&mut Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } macro_rules! set_params { ($encoder:ident, ($($param:expr),+)) => ( let mut _index = 0; $( set_param($encoder, _index, $param); _index += 1; )* ); } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Source { Affine, Indexing, Unary, Binary, Ternary, Cast, Reduce, Mfa, Conv, Random, Quantized, } pub mod copy2d { pub struct Kernel(pub &'static str); pub const FLOAT: Kernel = Kernel("copy2d_f32"); pub const HALF: Kernel = Kernel("copy2d_f16"); pub const BFLOAT: Kernel = Kernel("copy2d_bf16"); pub const I64: Kernel = Kernel("copy2d_i64"); pub const U32: Kernel = Kernel("copy2d_u32"); pub const U8: Kernel = Kernel("copy2d_u8"); } macro_rules! ops{ ($($name:ident),+) => { pub mod contiguous { pub struct Kernel(pub &'static str); $( pub mod $name { use super::Kernel; pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32")); pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16")); pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16")); pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64")); pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32")); pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8")); } )+ pub mod copy { use super::Kernel; pub const FLOAT: Kernel = Kernel("copy_f32"); pub const HALF: Kernel = Kernel("copy_f16"); pub const BFLOAT: Kernel = Kernel("copy_bf16"); pub const I64: Kernel = Kernel("copy_i64"); pub const U32: Kernel = Kernel("copy_u32"); pub const U8: Kernel = Kernel("copy_u8"); } } pub mod strided { pub struct Kernel(pub &'static str); $( pub mod $name { use super::Kernel; pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_strided")); pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_strided")); pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_strided")); pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64_strided")); pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32_strided")); pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8_strided")); } )+ pub mod copy { use super::Kernel; pub const FLOAT: Kernel = Kernel("copy_f32_strided"); pub const HALF: Kernel = Kernel("copy_f16_strided"); pub const BFLOAT: Kernel = Kernel("copy_bf16_strided"); pub const I64: Kernel = Kernel("copy_i64_strided"); pub const U32: Kernel = Kernel("copy_u32_strided"); pub const U8: Kernel = Kernel("copy_u8_strided"); } } }; } pub mod unary { ops!( cos, sin, exp, sqr, sqrt, neg, log, gelu, abs, ceil, floor, relu, round, erf, gelu_erf, tanh, recip, silu ); } pub mod binary { ops!(add, sub, mul, div, min, max, eq, ne, le, lt, ge, gt); } #[derive(thiserror::Error, Debug)] pub enum MetalKernelError { #[error("Could not lock kernel map: {0}")] LockError(String), #[error("Error while loading library: {0}")] LoadLibraryError(String), #[error("Error while loading function: {0:?}")] LoadFunctionError(String), #[error("Failed to create compute function")] FailedToCreateComputeFunction, #[error("Failed to create pipeline")] FailedToCreatePipeline(String), #[error("Invalid matmul arguments {lhs_stride:?} {rhs_stride:?} {mnk:?}")] MatMulNonContiguous { lhs_stride: Vec<usize>, rhs_stride: Vec<usize>, mnk: (usize, usize, usize), }, } impl<T> From<std::sync::PoisonError<T>> for MetalKernelError { fn from(e: std::sync::PoisonError<T>) -> Self { Self::LockError(e.to_string()) } } type Libraries = HashMap<Source, Library>; type Pipelines = HashMap<(&'static str, Option<ConstantValues>), ComputePipelineState>; #[derive(Debug)] pub struct Kernels { libraries: RwLock<Libraries>, pipelines: RwLock<Pipelines>, } impl Kernels { pub fn new() -> Self { let libraries = RwLock::new(Libraries::new()); let pipelines = RwLock::new(Pipelines::new()); Self { libraries, pipelines, } } fn get_library_source(&self, source: Source) -> &'static str { match source { Source::Affine => AFFINE, Source::Unary => UNARY, Source::Binary => BINARY, Source::Ternary => TERNARY, Source::Indexing => INDEXING, Source::Cast => CAST, Source::Reduce => REDUCE, Source::Conv => CONV, Source::Random => RANDOM, Source::Quantized => QUANTIZED, Source::Mfa => panic!("Invalid lib"), } } /// Load the give library from its [`source`]. /// If this has been previously loaded it will just fetch it from cache. pub fn load_library( &self, device: &Device, source: Source, ) -> Result<Library, MetalKernelError> { let mut libraries = self.libraries.write()?; if let Some(lib) = libraries.get(&source) { Ok(lib.clone()) } else { let lib = match source { Source::Mfa => { let source_data = MFA; device.new_library_with_data(source_data).map_err(|e| { MetalKernelError::LoadLibraryError(format!( "Candle metal requires macosx > 13.0 or higher, cannot load mfa: {e}" )) })? } source => { let source_content = self.get_library_source(source); device .new_library_with_source(source_content, &CompileOptions::new()) .map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))? } }; libraries.insert(source, lib.clone()); Ok(lib) } } fn load_function( &self, device: &Device, source: Source, name: &'static str, constants: Option<FunctionConstantValues>, ) -> Result<Function, MetalKernelError> { let func = self .load_library(device, source)? .get_function(name, constants) .map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?; Ok(func) } /// Load the give pipeline /// loads the library from source, then gets the function [`name`] from /// that source fn load_pipeline_with_constants( &self, device: &Device, source: Source, name: &'static str, constants: Option<ConstantValues>, ) -> Result<ComputePipelineState, MetalKernelError> { let mut pipelines = self.pipelines.write()?; let key = (name, constants); if let Some(pipeline) = pipelines.get(&key) { Ok(pipeline.clone()) } else { let (name, constants) = key; let func = self.load_function( device, source, name, constants.as_ref().map(|c| c.function_constant_values()), )?; let pipeline = device .new_compute_pipeline_state_with_function(&func) .map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?; pipelines.insert((name, constants), pipeline.clone()); Ok(pipeline) } } /// Load the give pipeline /// loads the library from source, then gets the function [`name`] from /// that source (without constants) pub fn load_pipeline( &self, device: &Device, source: Source, name: &'static str, ) -> Result<ComputePipelineState, MetalKernelError> { self.load_pipeline_with_constants(device, source, name, None) } } #[allow(clippy::too_many_arguments)] pub fn call_unary_contiguous( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: unary::contiguous::Kernel, length: usize, input: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_copy2d( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: copy2d::Kernel, input: &Buffer, output: &Buffer, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o_in_bytes: usize, dst_o_in_bytes: usize, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( d1, d2, src_s, dst_s, (input, src_o_in_bytes), (output, dst_o_in_bytes) ) ); let width: usize = d1 * d2; let (thread_group_count, thread_group_size) = linear_split(&pipeline, width); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_unary_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: unary::strided::Kernel, shape: &[usize], input: &Buffer, strides: &[usize], offset: usize, output: &Buffer, output_offset: usize, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?; let num_dims: usize = shape.len(); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); let length: usize = shape.iter().product(); set_params!( encoder, ( length, num_dims, shape, strides, (input, offset), (output, output_offset) ) ); let width: usize = shape.iter().product(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, width); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_binary_contiguous( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: binary::contiguous::Kernel, length: usize, left: &Buffer, right: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, left, right, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(left, metal::MTLResourceUsage::Read); encoder.use_resource(right, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_binary_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: binary::strided::Kernel, shape: &[usize], left_input: &Buffer, left_strides: &[usize], left_offset: usize, right_input: &Buffer, right_strides: &[usize], right_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Binary, name.0)?; let num_dims: usize = shape.len(); let encoder = command_buffer.new_compute_command_encoder(); let width: usize = shape.iter().product(); encoder.set_compute_pipeline_state(&pipeline); let length: usize = shape.iter().product(); set_params!( encoder, ( length, num_dims, shape, left_strides, right_strides, (left_input, left_offset), (right_input, right_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, width); encoder.use_resource(left_input, metal::MTLResourceUsage::Read); encoder.use_resource(right_input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_cast_contiguous( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: &'static str, length: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, (input, input_offset), output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_cast_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: &'static str, shape: &[usize], input: &Buffer, input_strides: &[usize], input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); let length: usize = shape.iter().product(); set_params!( encoder, ( length, shape.len(), shape, input_strides, (input, input_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } pub fn call_reduce_contiguous( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: &'static str, length: usize, out_length: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let elements_to_sum = length / out_length; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (length, elements_to_sum, (input, input_offset), output) ); let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), (elements_to_sum as u64 + 2 - 1) / 2, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } pub fn call_reduce_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: &'static str, shape: &[usize], strides: &[usize], out_length: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let length: usize = shape.iter().product(); let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let elements_to_sum = length / out_length; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( shape.len(), shape, strides, elements_to_sum, (input, input_offset), output ) ); let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_last_softmax( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, kernel_name: &'static str, length: usize, elements_to_sum: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (length, elements_to_sum, (input, input_offset), output) ); let out_length = length / elements_to_sum; let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_affine( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, size: usize, input: &Buffer, output: &Buffer, mul: f32, add: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, add, input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_affine_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], input: &Buffer, input_stride: &[usize], input_offset: usize, output: &Buffer, mul: f32, add: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( size, shape.len(), shape, input_stride, mul, add, (input, input_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_powf( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, size: usize, input: &Buffer, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_powf_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], input: &Buffer, input_stride: &[usize], input_offset: usize, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( size, shape.len(), shape, input_stride, mul, (input, input_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_elu( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, size: usize, input: &Buffer, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_elu_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], input: &Buffer, input_stride: &[usize], input_offset: usize, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( size, shape.len(), shape, input_stride, mul, (input, input_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } pub fn call_where_cond_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], cond: &Buffer, (cond_stride, cond_offset): (&[usize], usize), left: &Buffer, (left_stride, left_offset): (&[usize], usize), right: &Buffer, (right_stride, right_offset): (&[usize], usize), output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); let size: usize = shape.iter().product(); let rank = shape.len(); set_params!( encoder, ( size, rank, shape, cond_stride, left_stride, right_stride, (cond, cond_offset), (left, left_offset), (right, right_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(cond, metal::MTLResourceUsage::Read); encoder.use_resource(left, metal::MTLResourceUsage::Read); encoder.use_resource(right, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_index_select( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], ids_size: usize, dim: usize, input: &Buffer, ids: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let src_dim_size = shape[dim]; let dst_el = ids_size * left_size * right_size; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, ids_size, input, ids, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(ids, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_gather( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], ids_size: usize, dim: usize, input: &Buffer, input_offset: usize, ids: &Buffer, ids_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let src_dim_size = shape[dim]; let dst_el = ids_size * left_size * right_size; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, ids_size, (input, input_offset), (ids, ids_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(ids, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } pub fn call_scatter_add( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, src_shape: &[usize], dst_shape: &[usize], dim: usize, input: &Buffer, input_offset: usize, ids: &Buffer, ids_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = src_shape[..dim].iter().product(); let right_size: usize = src_shape[dim + 1..].iter().product(); let src_dim_size = src_shape[dim]; let dst_el = left_size * right_size; let dst_dim_size = dst_shape[dim]; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, dst_dim_size, (input, input_offset), (ids, ids_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(ids, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } pub fn call_index_add( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, src_shape: &[usize], dst_shape: &[usize], ids_shape: &[usize], dim: usize, input: &Buffer, input_offset: usize, ids: &Buffer, ids_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = src_shape[..dim].iter().product(); let right_size: usize = src_shape[dim + 1..].iter().product(); let src_dim_size = src_shape[dim]; let dst_el = left_size * right_size; let dst_dim_size = dst_shape[dim]; let ids_dim_size = ids_shape[0]; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, (input, input_offset), (ids, ids_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(ids, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[derive(Debug, PartialEq)] pub enum Value { USize(usize), Bool(bool), F32(f32), U16(u16), } impl std::hash::Hash for Value { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { match self { Value::F32(v) => v.to_bits().hash(state), Value::USize(v) => v.hash(state), Value::U16(v) => v.hash(state), Value::Bool(v) => v.hash(state), } } } impl Value { fn data_type(&self) -> MTLDataType { match self { Value::USize(_) => MTLDataType::UInt, Value::F32(_) => MTLDataType::Float, Value::U16(_) => MTLDataType::UShort, Value::Bool(_) => MTLDataType::Bool, } } } /// Not true, good enough for our purposes. impl Eq for Value {} #[derive(Debug, Eq, PartialEq, Hash)] struct ConstantValues(Vec<(usize, Value)>); impl ConstantValues { pub fn new(values: Vec<(usize, Value)>) -> Self { Self(values) } fn function_constant_values(&self) -> FunctionConstantValues { let f = FunctionConstantValues::new(); for (index, value) in &self.0 { let ty = value.data_type(); match value { Value::USize(v) => { f.set_constant_value_at_index( v as *const usize as *const c_void, ty, *index as u64, ); } Value::F32(v) => { f.set_constant_value_at_index( v as *const f32 as *const c_void, ty, *index as u64, ); } Value::U16(v) => { f.set_constant_value_at_index( v as *const u16 as *const c_void, ty, *index as u64, ); } Value::Bool(v) => { f.set_constant_value_at_index( v as *const bool as *const c_void, ty, *index as u64, ); } } } f } } #[allow(clippy::too_many_arguments)] pub fn call_gemm( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, (b, m, n, k): (usize, usize, usize, usize), lhs_stride: &[usize], lhs_offset: usize, lhs_buffer: &Buffer, rhs_stride: &[usize], rhs_offset: usize, rhs_buffer: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { assert!(rhs_stride.len() >= 2); assert!(lhs_stride.len() >= 2); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let a_trans = if lhs_m1 == 1 && lhs_m2 == k { false } else if lhs_m1 == m && lhs_m2 == 1 { true } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; let b_trans = if rhs_m1 == 1 && rhs_m2 == n { false } else if rhs_m1 == k && rhs_m2 == 1 { true } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; let d_trans = false; let alpha = 1.0f32; let beta = 0.0f32; let batched = b > 1; let fused_activation = false; let fused_bias = false; let (m_simd, n_simd, k_simd, m_splits, n_splits) = if m == 1 { let m_simd = 8; let n_simd = 8; let k_simd = 64; let m_splits = 1; let n_splits = 1; (m_simd, n_simd, k_simd, m_splits, n_splits) } else { let m_simd = 40; let n_simd = 40; let k_simd = 32; let m_splits = 1; let n_splits = 1; (m_simd, n_simd, k_simd, m_splits, n_splits) }; let constants = Some(ConstantValues::new(vec![ (0, Value::USize(m)), (1, Value::USize(n)), (2, Value::USize(k)), (10, Value::Bool(a_trans)), (11, Value::Bool(b_trans)), (13, Value::Bool(d_trans)), (20, Value::F32(alpha)), (21, Value::F32(beta)), (100, Value::Bool(batched)), (101, Value::Bool(fused_activation)), // Garbage (102, Value::Bool(false)), (103, Value::Bool(false)), (113, Value::Bool(false)), (50_000, Value::Bool(false)), // End garbage (200, Value::U16(m_simd)), (201, Value::U16(n_simd)), (202, Value::U16(k_simd)), (210, Value::U16(m_splits)), (211, Value::U16(n_splits)), (50_001, Value::Bool(fused_bias)), ])); let pipeline = kernels.load_pipeline_with_constants(device, Source::Mfa, name, constants)?; let m_group = m_simd * m_splits; let n_group = n_simd * n_splits; let a_block_length = m_group * k_simd; let b_block_length = k_simd * n_group; let mut block_elements = a_block_length + b_block_length; if (m % 8 != 0) && (n % 8 != 0) { let c_block_length = m_group * n_group; block_elements = std::cmp::max(c_block_length, block_elements) } if fused_bias { if d_trans { block_elements = std::cmp::max(block_elements, m_group); } else { block_elements = std::cmp::max(block_elements, n_group); } } let bytes = match name { "sgemm" => 4, "hgemm" => 2, other => { return Err(MetalKernelError::LoadLibraryError(format!( "{other} is not a valid kernel for gemm" ))); } }; let block_bytes = block_elements * bytes; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); encoder.set_threadgroup_memory_length(0, block_bytes.into()); encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger); encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger); encoder.set_buffer(2, Some(output), 0); // TODO Tensor D let grid_z = b; if batched { let byte_stride_a: usize = lhs_stride[lhs_stride.len() - 3] * bytes as usize; let byte_stride_b: usize = rhs_stride[rhs_stride.len() - 3] * bytes as usize; let byte_stride_c = m * n * bytes as usize; // TODO byte_stride_d let byte_stride_d = 0; let buffer: Vec<u64> = vec![ byte_stride_a as _, byte_stride_b as _, byte_stride_c as _, byte_stride_d as _, ]; encoder.set_bytes( 10, (buffer.len() * core::mem::size_of::<u64>()) as NSUInteger, buffer.as_ptr() as *const NSUInteger as *const c_void, ); } let grid_size = MTLSize { width: divide(n, n_group.into()), height: divide(m, m_group.into()), depth: grid_z as NSUInteger, }; let group_size = MTLSize { width: 32 * (m_splits as u64) * (n_splits as u64), height: 1, depth: 1, }; encoder.use_resource(lhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(rhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(grid_size, group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_im2col1d_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], (k_size, stride, padding, dilation): (usize, usize, usize, usize), input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let l_out = (shape[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1; let dst_el = shape[0] * l_out * shape[1] * k_size; let encoder = command_buffer.new_compute_command_encoder(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, l_out, k_size, stride, padding, dilation, shape, strides, (input, input_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_im2col_strided( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], (h_k, w_k, stride, padding, dilation): (usize, usize, usize, usize, usize), input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let h = shape[2]; let w = shape[3]; let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1; let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1; let dst_el = shape[0] * h_out * w_out * shape[1] * h_k * w_k; let encoder = command_buffer.new_compute_command_encoder(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, h_out, w_out, h_k, w_k, stride, padding, dilation, shape, strides, (input, input_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_upsample_nearest_2d( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], out_w: usize, out_h: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let dst_el = out_w * out_h * shape[0] * shape[1]; let scale_w = shape[2] as f32 / out_w as f32; let scale_h = shape[3] as f32 / out_h as f32; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( out_w, out_h, scale_w, scale_h, shape, strides, (input, input_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_random_uniform( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, min: f32, max: f32, length: usize, seed: &Buffer, buffer: &Buffer, ) -> Result<(), MetalKernelError> { if min >= max { return Err(MetalKernelError::LoadLibraryError( "min must be less than max".to_string(), )); } let pipeline = kernels.load_pipeline(device, Source::Random, name)?; let encoder = command_buffer.new_compute_command_encoder(); let odd = (length % 2 != 0) as usize; let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, min, max, seed, buffer)); encoder.use_resource( seed, metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write, ); encoder.use_resource(buffer, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_random_normal( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, mean: f32, stddev: f32, length: usize, seed: &Buffer, buffer: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Random, name)?; let encoder = command_buffer.new_compute_command_encoder(); let odd = (length % 2 != 0) as usize; let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, mean, stddev, seed, buffer)); encoder.use_resource( seed, metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write, ); encoder.use_resource(buffer, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[derive(Debug, Clone, Copy)] pub enum GgmlDType { Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, Q2K, Q3K, Q4K, Q5K, Q6K, Q8K, F16, F32, } pub fn call_quantized_matmul_t( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, dtype: GgmlDType, (b, m, n, k): (usize, usize, usize, usize), lhs: &Buffer, lhs_offset: usize, rhs: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { // Everything is in reverse let ne00 = k as i64; let ne01 = n as i64; let ne02 = b as i64; let ne03 = 1 as i64; let nb00 = 0i64; let nb01 = 0 as i64; let nb02 = 0 as i64; let ne10 = k as i64; let ne11 = m as i64; let ne12 = b as i64; let ne13 = 1 as i64; let nb10 = 0i64; let nb11 = 0i64; let nb12 = 0i64; let ne0 = n as i64; let ne1 = m as i64; let r2: u32 = (ne12 / ne02) as u32; let r3: u32 = (ne13 / ne03) as u32; let (nth0, nth1, align) = match dtype { GgmlDType::Q4_0 | GgmlDType::Q4_1 | GgmlDType::Q5_0 | GgmlDType::Q5_1 | GgmlDType::Q8_0 | GgmlDType::Q8_1 => { let nth0 = 8; let nth1 = 8; let align = 8; (nth0, nth1, align) } GgmlDType::Q2K => { // Fixing a bug in Metal for GGML let nth0 = 4; let nth1 = 8; let align = 4; (nth0, nth1, align) } GgmlDType::Q4K => { let nth0 = 4; let nth1 = 8; let align = 4; (nth0, nth1, align) } GgmlDType::Q3K | GgmlDType::Q5K => { let nth0 = 2; let nth1 = 32; let align = 4; (nth0, nth1, align) } GgmlDType::Q6K => { let nth0 = 2; let nth1 = 32; let align = 2; (nth0, nth1, align) } GgmlDType::F16 | GgmlDType::Q8K => { // Original implem uses rows let nth0 = 32; let nth1 = 1; let align = 8; (nth0, nth1, align) } GgmlDType::F32 => { let nth0 = 32; let nth1 = 1; let align = 8; (nth0, nth1, align) } }; let thread_groups_count = MTLSize { width: divide(ne01 as usize, align), height: ne11 as u64, depth: (ne12 * ne13) as u64, }; let threads_per_threadgroup = MTLSize { width: nth0, height: nth1, depth: 1, }; let name = match dtype { GgmlDType::Q4_0 => "kernel_mul_mv_q4_0_f32", GgmlDType::Q4_1 => "kernel_mul_mv_q4_1_f32", GgmlDType::Q5_0 => "kernel_mul_mv_q5_0_f32", GgmlDType::Q5_1 => "kernel_mul_mv_q5_1_f32", GgmlDType::Q8_0 => "kernel_mul_mv_q8_0_f32", GgmlDType::Q8_1 => "kernel_mul_mv_q8_1_f32", GgmlDType::Q2K => "kernel_mul_mv_q2_K_f32", GgmlDType::Q3K => "kernel_mul_mv_q3_K_f32", GgmlDType::Q4K => "kernel_mul_mv_q4_K_f32", GgmlDType::Q5K => "kernel_mul_mv_q5_K_f32", GgmlDType::Q6K => "kernel_mul_mv_q6_K_f32", GgmlDType::Q8K => "kernel_mul_mv_q8_K_f32", GgmlDType::F16 => "kernel_mul_mv_f16_f32", GgmlDType::F32 => "kernel_mul_mv_f32_f32", }; let pipeline = kernels.load_pipeline(device, Source::Quantized, name)?; let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( rhs, (lhs, lhs_offset), output, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3 ) ); encoder.set_threadgroup_memory_length(0, 8192); encoder.use_resource(lhs, metal::MTLResourceUsage::Read); encoder.use_resource(rhs, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_groups_count, threads_per_threadgroup); encoder.end_encoding(); Ok(()) } fn divide(m: usize, b: usize) -> NSUInteger { ((m + b - 1) / b) as NSUInteger } #[allow(clippy::too_many_arguments)] pub fn call_pool2d( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], out_w: usize, out_h: usize, w_k: usize, h_k: usize, w_stride: usize, h_stride: usize, input: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let dst_el = out_w * out_h * shape[0] * shape[1]; let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (w_k, h_k, w_stride, h_stride, shape, strides, input, output) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_conv_transpose1d( device: &Device, command_buffer: &CommandBufferRef, kernels: &Kernels, name: &'static str, dilation: usize, stride: usize, padding: usize, out_padding: usize, c_out: usize, l_out: usize, b_size: usize, src_shape: &[usize], src_strides: &[usize], kernel_shape: &[usize], kernel_strides: &[usize], input: &Buffer, input_offset: usize, kernel: &Buffer, kernel_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let dst_el = c_out * l_out * b_size; let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = command_buffer.new_compute_command_encoder(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( l_out, stride, padding, out_padding, dilation, src_shape, src_strides, kernel_shape, kernel_strides, (input, input_offset), (kernel, kernel_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(kernel, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); Ok(()) } #[cfg(test)] mod tests;
candle/candle-metal-kernels/src/lib.rs/0
{ "file_path": "candle/candle-metal-kernels/src/lib.rs", "repo_id": "candle", "token_count": 26995 }
32
use candle::{Result, Tensor}; use serde::Deserialize; #[derive(Debug, Clone, Copy, PartialEq, Deserialize, Default)] #[serde(rename_all = "lowercase")] pub enum Activation { #[default] #[serde(alias = "gelu")] Gelu, #[serde(alias = "gelu_new")] NewGelu, Relu, Relu2, Relu6, Silu, Sigmoid, HardSigmoid, Swiglu, Swish, HardSwish, Elu(f64), LeakyRelu(f64), #[serde(alias = "gelu_pytorch_tanh")] GeluPytorchTanh, } impl super::Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Gelu => xs.gelu_erf(), // https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78 Self::NewGelu => xs.gelu(), Self::Relu => xs.relu(), Self::Relu2 => xs.relu()?.sqr(), Self::Relu6 => xs.clamp(0f32, 6f32), Self::Silu => xs.silu(), Self::Sigmoid => crate::ops::sigmoid(xs), Self::HardSigmoid => crate::ops::hard_sigmoid(xs), Self::Swiglu => crate::ops::swiglu(xs), Self::Swish => xs * crate::ops::sigmoid(xs)?, Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?, &Self::Elu(alpha) => xs.elu(alpha), &Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope), Self::GeluPytorchTanh => xs.gelu(), } } } #[derive(Clone, Debug)] pub struct PReLU { weight: Tensor, is_scalar: bool, } impl PReLU { pub fn new(weight: Tensor, is_scalar: bool) -> Self { Self { weight, is_scalar } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn is_scalar(&self) -> bool { self.is_scalar } } impl candle::Module for PReLU { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let weight = if self.is_scalar { self.weight.reshape(())? } else if xs.rank() >= 2 { let num_channels = xs.dim(1)?; let num_weights = self.weight.elem_count(); if num_weights != num_channels { candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}") } let mut s = vec![1; xs.rank()]; s[1] = self.weight.elem_count(); self.weight.reshape(s)? } else { self.weight.clone() }; let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)? } } /// Create or initialize a new PReLU layer. /// /// This uses some default name for weights, namely `"weight"`. /// # Arguments /// /// * `num_channels` - The number of channels. Use `None` to have as single trainable value and /// `Some` for a 1D vector with the appropriate number of channels. When applying the `forward` /// function, the input tensor shape `s` should either be one dimension with this number of /// channels or if `s.len() >= 2` it should have `s[1]` equal to this number. pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> { let init_ws = crate::init::Init::Const(0.25); // When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1. let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?; Ok(PReLU::new(ws, num_channels.is_none())) }
candle/candle-nn/src/activation.rs/0
{ "file_path": "candle/candle-nn/src/activation.rs", "repo_id": "candle", "token_count": 1650 }
33
//! A `VarBuilder` is used to retrieve variables used by a model. These variables can either come //! from a pre-trained checkpoint, e.g. using `VarBuilder::from_mmaped_safetensors`, or initialized //! for training, e.g. using `VarBuilder::from_varmap`. use crate::VarMap; use candle::{safetensors::Load, DType, Device, Error, Result, Shape, Tensor}; use safetensors::{slice::IndexOp, tensor::SafeTensors}; use std::collections::HashMap; use std::sync::Arc; /// A structure used to retrieve variables, these variables can either come from storage or be /// generated via some form of initialization. /// /// The way to retrieve variables is defined in the backend embedded in the `VarBuilder`. pub struct VarBuilderArgs<'a, B: Backend> { data: Arc<TensorData<B>>, path: Vec<String>, _phantom: std::marker::PhantomData<&'a B>, } impl<'a, B: Backend> Clone for VarBuilderArgs<'a, B> { fn clone(&self) -> Self { Self { data: self.data.clone(), path: self.path.clone(), _phantom: self._phantom, } } } /// A simple `VarBuilder`, this is less generic than `VarBuilderArgs` but should cover most common /// use cases. pub type VarBuilder<'a> = VarBuilderArgs<'a, Box<dyn SimpleBackend + 'a>>; struct TensorData<B: Backend> { backend: B, pub dtype: DType, pub device: Device, } /// A trait that defines how tensor data is retrieved. /// /// Typically this would use disk storage in some specific format, or random initialization. /// Note that there is a specialized version of this trait (`SimpleBackend`) that can be used most /// of the time. The main restriction is that it doesn't allow for specific args (besides /// initialization hints). pub trait Backend: Send + Sync { type Hints: Default; /// Retrieve a tensor with some target shape. fn get( &self, s: Shape, name: &str, h: Self::Hints, dtype: DType, dev: &Device, ) -> Result<Tensor>; fn contains_tensor(&self, name: &str) -> bool; } pub trait SimpleBackend: Send + Sync { /// Retrieve a tensor based on a target name and shape. fn get( &self, s: Shape, name: &str, h: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor>; fn contains_tensor(&self, name: &str) -> bool; } impl<'a> Backend for Box<dyn SimpleBackend + 'a> { type Hints = crate::Init; fn get( &self, s: Shape, name: &str, h: Self::Hints, dtype: DType, dev: &Device, ) -> Result<Tensor> { self.as_ref().get(s, name, h, dtype, dev) } fn contains_tensor(&self, name: &str) -> bool { self.as_ref().contains_tensor(name) } } impl<'a, B: Backend> VarBuilderArgs<'a, B> { pub fn new_with_args(backend: B, dtype: DType, dev: &Device) -> Self { let data = TensorData { backend, dtype, device: dev.clone(), }; Self { data: Arc::new(data), path: vec![], _phantom: std::marker::PhantomData, } } /// Returns the prefix of the `VarBuilder`. pub fn prefix(&self) -> String { self.path.join(".") } /// Returns a new `VarBuilder` using the root path. pub fn root(&self) -> Self { Self { data: self.data.clone(), path: vec![], _phantom: std::marker::PhantomData, } } /// Returns a new `VarBuilder` with the prefix set to `prefix`. pub fn set_prefix(&self, prefix: impl ToString) -> Self { Self { data: self.data.clone(), path: vec![prefix.to_string()], _phantom: std::marker::PhantomData, } } /// Return a new `VarBuilder` adding `s` to the current prefix. This can be think of as `cd` /// into a directory. pub fn push_prefix<S: ToString>(&self, s: S) -> Self { let mut path = self.path.clone(); path.push(s.to_string()); Self { data: self.data.clone(), path, _phantom: std::marker::PhantomData, } } /// Short alias for `push_prefix`. pub fn pp<S: ToString>(&self, s: S) -> Self { self.push_prefix(s) } /// The device used by default. pub fn device(&self) -> &Device { &self.data.device } /// The dtype used by default. pub fn dtype(&self) -> DType { self.data.dtype } fn path(&self, tensor_name: &str) -> String { if self.path.is_empty() { tensor_name.to_string() } else { [&self.path.join("."), tensor_name].join(".") } } /// This returns true only if a tensor with the passed in name is available. E.g. when passed /// `a`, true is returned if `prefix.a` exists but false is returned if only `prefix.a.b` /// exists. pub fn contains_tensor(&self, tensor_name: &str) -> bool { let path = self.path(tensor_name); self.data.backend.contains_tensor(&path) } /// Retrieve the tensor associated with the given name at the current path. pub fn get_with_hints<S: Into<Shape>>( &self, s: S, name: &str, hints: B::Hints, ) -> Result<Tensor> { let path = self.path(name); self.data .backend .get(s.into(), &path, hints, self.data.dtype, &self.data.device) } /// Retrieve the tensor associated with the given name at the current path. pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Tensor> { self.get_with_hints(s, name, Default::default()) } } struct Zeros; impl SimpleBackend for Zeros { fn get(&self, s: Shape, _: &str, _: crate::Init, dtype: DType, dev: &Device) -> Result<Tensor> { Tensor::zeros(s, dtype, dev) } fn contains_tensor(&self, _name: &str) -> bool { true } } impl SimpleBackend for HashMap<String, Tensor> { fn get( &self, s: Shape, name: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let tensor = self .get(name) .ok_or_else(|| { Error::CannotFindTensor { path: name.to_string(), } .bt() })? .clone(); if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {name}"), expected: s, got: tensor.shape().clone(), } .bt())? } tensor.to_device(dev)?.to_dtype(dtype) } fn contains_tensor(&self, name: &str) -> bool { self.contains_key(name) } } impl SimpleBackend for VarMap { fn get( &self, s: Shape, name: &str, h: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { VarMap::get(self, s, name, h, dtype, dev) } fn contains_tensor(&self, name: &str) -> bool { self.data().lock().unwrap().contains_key(name) } } struct SafeTensorWithRouting<'a> { routing: HashMap<String, usize>, safetensors: Vec<SafeTensors<'a>>, } impl<'a> SimpleBackend for SafeTensorWithRouting<'a> { fn get( &self, s: Shape, path: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let index = self.routing.get(path).ok_or_else(|| { Error::CannotFindTensor { path: path.to_string(), } .bt() })?; let tensor = self.safetensors[*index] .tensor(path)? .load(dev)? .to_dtype(dtype)?; if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {path}"), expected: s, got: tensor.shape().clone(), } .bt())? } Ok(tensor) } fn contains_tensor(&self, name: &str) -> bool { self.routing.contains_key(name) } } impl SimpleBackend for candle::npy::NpzTensors { fn get( &self, s: Shape, path: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let tensor = match self.get(path)? { None => Err(Error::CannotFindTensor { path: path.to_string(), } .bt())?, Some(tensor) => tensor, }; let tensor = tensor.to_device(dev)?.to_dtype(dtype)?; if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {path}"), expected: s, got: tensor.shape().clone(), } .bt())? } Ok(tensor) } fn contains_tensor(&self, name: &str) -> bool { self.get(name).map_or(false, |v| v.is_some()) } } impl SimpleBackend for candle::pickle::PthTensors { fn get( &self, s: Shape, path: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let tensor = match self.get(path)? { None => Err(Error::CannotFindTensor { path: path.to_string(), } .bt())?, Some(tensor) => tensor, }; let tensor = tensor.to_device(dev)?.to_dtype(dtype)?; if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {path}"), expected: s, got: tensor.shape().clone(), } .bt())? } Ok(tensor) } fn contains_tensor(&self, name: &str) -> bool { self.get(name).map_or(false, |v| v.is_some()) } } impl SimpleBackend for candle::safetensors::MmapedSafetensors { fn get( &self, s: Shape, name: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let tensor = self.load(name, dev)?.to_dtype(dtype)?; if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {name}"), expected: s, got: tensor.shape().clone(), } .bt())? } Ok(tensor) } fn contains_tensor(&self, name: &str) -> bool { self.get(name).is_ok() } } impl SimpleBackend for candle::safetensors::BufferedSafetensors { fn get( &self, s: Shape, name: &str, _: crate::Init, dtype: DType, dev: &Device, ) -> Result<Tensor> { let tensor = self.load(name, dev)?.to_dtype(dtype)?; if tensor.shape() != &s { Err(candle::Error::UnexpectedShape { msg: format!("shape mismatch for {name}"), expected: s, got: tensor.shape().clone(), } .bt())? } Ok(tensor) } fn contains_tensor(&self, name: &str) -> bool { self.get(name).is_ok() } } impl<'a> VarBuilder<'a> { /// Initializes a `VarBuilder` using a custom backend. /// /// It is preferred to use one of the more specific constructors. This /// constructor is provided to allow downstream users to define their own /// backends. pub fn from_backend( backend: Box<dyn SimpleBackend + 'a>, dtype: DType, device: Device, ) -> Self { let data = TensorData { backend, dtype, device, }; Self { data: Arc::new(data), path: vec![], _phantom: std::marker::PhantomData, } } /// Initializes a `VarBuilder` that uses zeros for any tensor. pub fn zeros(dtype: DType, dev: &Device) -> Self { Self::from_backend(Box::new(Zeros), dtype, dev.clone()) } /// Initializes a `VarBuilder` that retrieves tensors stored in a hashtable. An error is /// returned if no tensor is available under the requested path or on shape mismatches. pub fn from_tensors(ts: HashMap<String, Tensor>, dtype: DType, dev: &Device) -> Self { Self::from_backend(Box::new(ts), dtype, dev.clone()) } /// Initializes a `VarBuilder` using a `VarMap`. The requested tensors are created and /// initialized on new paths, the same tensor is used if the same path is requested multiple /// times. This is commonly used when initializing a model before training. /// /// Note that it is possible to load the tensor values after model creation using the `load` /// method on `varmap`, this can be used to start model training from an existing checkpoint. pub fn from_varmap(varmap: &VarMap, dtype: DType, dev: &Device) -> Self { Self::from_backend(Box::new(varmap.clone()), dtype, dev.clone()) } /// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors /// files. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn from_mmaped_safetensors<P: AsRef<std::path::Path>>( paths: &[P], dtype: DType, dev: &Device, ) -> Result<Self> { let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?; Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone())) } /// Initializes a `VarBuilder` from a binary builder in the safetensor format. pub fn from_buffered_safetensors(data: Vec<u8>, dtype: DType, dev: &Device) -> Result<Self> { let tensors = candle::safetensors::BufferedSafetensors::new(data)?; Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone())) } /// Initializes a `VarBuilder` that retrieves tensors stored in a numpy npz file. pub fn from_npz<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> { let npz = candle::npy::NpzTensors::new(p)?; Ok(Self::from_backend(Box::new(npz), dtype, dev.clone())) } /// Initializes a `VarBuilder` that retrieves tensors stored in a pytorch pth file. pub fn from_pth<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> { let pth = candle::pickle::PthTensors::new(p, None)?; Ok(Self::from_backend(Box::new(pth), dtype, dev.clone())) } } pub struct ShardedSafeTensors(candle::safetensors::MmapedSafetensors); pub type ShardedVarBuilder<'a> = VarBuilderArgs<'a, ShardedSafeTensors>; impl ShardedSafeTensors { /// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors /// files and make them usable in a sharded way. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn var_builder<P: AsRef<std::path::Path>>( paths: &[P], dtype: DType, dev: &Device, ) -> Result<ShardedVarBuilder<'static>> { let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?; let backend = ShardedSafeTensors(tensors); Ok(VarBuilderArgs::new_with_args(backend, dtype, dev)) } } #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct Shard { pub dim: usize, pub rank: usize, pub world_size: usize, } impl Default for Shard { fn default() -> Self { Self { dim: 0, rank: 0, world_size: 1, } } } /// Get part of a tensor, typically used to do Tensor Parallelism sharding. /// /// If the tensor is of size (1024, 1024). /// /// `dim` corresponds to the dimension to slice into /// `rank` is the rank of the current process /// `world_size` is the total number of ranks in the process group /// /// `get_sharded("tensor", 0, 0, 2)` means `tensor.i((..512))` /// `get_sharded("tensor", 0, 1, 2)` means `tensor.i((512..))` /// `get_sharded("tensor", 1, 0, 2)` means `tensor.i((.., ..512))` impl Backend for ShardedSafeTensors { type Hints = Shard; fn get( &self, target_shape: Shape, // The size is only checked when the world size is 1. path: &str, h: Self::Hints, dtype: DType, dev: &Device, ) -> Result<Tensor> { if h.world_size == 1 { // There is no sharding to be applied here so we use the default backend to speed // things up. return SimpleBackend::get(&self.0, target_shape, path, Default::default(), dtype, dev); } let Shard { dim, rank, world_size, } = h; let view = self.0.get(path)?; let view_dtype = view.dtype(); let mut shape = view.shape().to_vec(); let size = shape[dim]; if size % world_size != 0 { return Err(Error::ShapeMismatchSplit { shape: shape.into(), dim, n_parts: world_size, }); } let block_size = size / world_size; let start = rank * block_size; let stop = (rank + 1) * block_size; // Everything is expressed in tensor dimension // bytes offsets is handled automatically for safetensors. let iterator = if dim == 0 { view.slice(start..stop).map_err(|_| { Error::Msg(format!( "Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}" )) })? } else if dim == 1 { view.slice((.., start..stop)).map_err(|_| { Error::Msg(format!( "Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}" )) })? } else { candle::bail!("Get sharded on dimensions != 0 or 1") }; shape[dim] = block_size; let view_dtype: DType = view_dtype.try_into()?; let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect(); Tensor::from_raw_buffer(&raw, view_dtype, &shape, dev)?.to_dtype(dtype) } fn contains_tensor(&self, name: &str) -> bool { self.0.get(name).is_ok() } }
candle/candle-nn/src/var_builder.rs/0
{ "file_path": "candle/candle-nn/src/var_builder.rs", "repo_id": "candle", "token_count": 8632 }
34
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{Device, Result, Tensor}; use candle_onnx::onnx::{AttributeProto, GraphProto, ModelProto, NodeProto, ValueInfoProto}; use std::collections::HashMap; const INPUT_X: &str = "x"; const INPUT_Y: &str = "y"; const OUTPUT_Z: &str = "z"; fn create_model_proto_with_graph(graph: Option<GraphProto>) -> ModelProto { ModelProto { metadata_props: vec![], training_info: vec![], functions: vec![], ir_version: 0, opset_import: vec![], producer_name: "".to_string(), producer_version: "".to_string(), domain: "".to_string(), model_version: 0, doc_string: "".to_string(), graph, } } #[test] fn test_evaluation_fails_without_defined_graph() -> Result<()> { let manual_graph = create_model_proto_with_graph(None); let inputs: HashMap<String, Tensor> = HashMap::new(); match candle_onnx::simple_eval(&manual_graph, inputs) { Err(err) => assert_eq!(err.to_string(), "no graph defined in proto"), Ok(_) => panic!("Expected an error due to undefined graph"), } Ok(()) } // "Add" #[test] fn test_add_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Add".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 4.0f64); Ok(()) } // "Sub" #[test] fn test_sub_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Sub".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 0.0f64); Ok(()) } // "Mul" #[test] fn test_mul_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Mul".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 4.0f64); Ok(()) } // "Div" #[test] fn test_div_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Div".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 1.0f64); Ok(()) } // "Equal" #[test] fn test_equal_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Equal".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0]; assert_eq!(first, 1); Ok(()) } // "Not" #[test] fn test_not_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Not".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[0.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0]; assert_eq!(first, 1); Ok(()) } // "MatMul" #[test] fn test_matmul_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "MatMul".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert( INPUT_X.to_string(), Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?, ); inputs.insert( INPUT_Y.to_string(), Tensor::from_vec( // vec![5.0f32, 6.0f32, 7.0f32, 8.0f32], &[2, 2], &Device::Cpu, )?, ); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![19.0, 22.0], vec![43.0, 50.0]]); Ok(()) } // "Reshape" #[test] fn test_reshape_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Reshape".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let y = Tensor::from_vec( // vec![4i64], &[1], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); inputs.insert(INPUT_Y.to_string(), y); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec1::<f32>()?; assert_eq!(results, vec![1.0, 2.0, 3.0, 4.0]); Ok(()) } // "LogSoftmax" #[test] fn test_logsoftmax_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "LogSoftmax".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]] ); Ok(()) } // "Softmax" #[test] fn test_softmax_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Softmax".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]] ); Ok(()) } // "Transpose" #[test] fn test_transpose_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Transpose".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 3.0], vec![2.0, 4.0]]); Ok(()) } // "Dropout" #[test] fn test_dropout_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Dropout".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]); Ok(()) } // "Flatten" #[test] fn test_flatten_operation() -> Result<()> { let mut att_axis = AttributeProto { name: "axis".to_string(), ref_attr_name: "axis".to_string(), i: 0, doc_string: "axis".to_string(), r#type: 2, f: 0.0, s: vec![], t: None, g: None, sparse_tensor: None, tp: None, floats: vec![], ints: vec![], strings: vec![], tensors: vec![], graphs: vec![], sparse_tensors: vec![], type_protos: vec![], }; let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Flatten".to_string(), domain: "".to_string(), attribute: vec![att_axis.clone()], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( vec![ 1.0f32, 2.0f32, 3.0f32, 4.0f32, 5.0f32, 6.0f32, 7.0f32, 8.0f32, ], &[2, 2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs.clone())?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]]); att_axis.i = 1; let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Flatten".to_string(), domain: "".to_string(), attribute: vec![att_axis.clone()], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![1.0, 2.0, 3.0, 4.0], vec![5.0, 6.0, 7.0, 8.0]] ); Ok(()) } // Below are ops that are implemented but not tested yet // "MaxPool" // #[test] // "AveragePool" // #[test] // "BatchNormalization" // #[test] // "Squeeze" // #[test] // "ConstantOfShape" // #[test] // "Unsqueeze" // #[test] // "Clip" // #[test] // "Gather" // #[test] // "Shape" #[test] fn test_shape_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Shape".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec1::<i64>()?; assert_eq!(results, vec![2, 2]); Ok(()) } // "Conv" // #[test] // "Concat" // #[test] // "Abs" #[test] fn test_abs_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Abs".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( vec![-1.0f32, 2.0f32, -3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]); Ok(()) } // "Cos" #[test] fn test_cos_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Cos".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![1.0, 0.54030234], vec![-0.41614684, -0.9899925]] ); Ok(()) } // "Sin" #[test] fn test_sin_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Sin".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![0.0, 0.841471], vec![0.9092974, 0.14112]]); Ok(()) } // "Neg" #[test] fn test_neg_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Neg".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![-1.0, -2.0], vec![-3.0, -4.0]]); Ok(()) } // "Erf" // #[test] // "Tanh" #[test] fn test_tanh_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Tanh".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.0, 0.7615942], vec![0.9640276, 0.9950548]] ); Ok(()) } // "Sigmoid" #[test] fn test_sigmoid_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Sigmoid".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.5, 0.7310586], vec![0.880797, 0.95257413]] ); Ok(()) } // "Gelu" #[test] fn test_gelu_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Gelu".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.0, 0.8413448], vec![1.9544997, 2.9959502]] ); Ok(()) } // "Relu" #[test] fn test_relu_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Relu".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( vec![-1.0f32, 1.0f32, -2.0f32, 3.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![0.0, 1.0], vec![0.0, 3.0]]); Ok(()) } // "Constant" // #[test] // "Cast" // #[test]
candle/candle-onnx/tests/ops.rs/0
{ "file_path": "candle/candle-onnx/tests/ops.rs", "repo_id": "candle", "token_count": 20694 }
35
import math from typing import Any import candle from candle import Tensor from .module import Module # See https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/linear.py class Identity(Module): r"""A placeholder identity operator that is argument-insensitive. Args: args: any argument (unused) kwargs: any keyword argument (unused) Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Output: :math:`(*)`, same shape as the input. Examples:: >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False) >>> input = candle.randn(128, 20) >>> output = m(input) >>> print(output.shape) """ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__() def forward(self, input: Tensor) -> Tensor: return input class Linear(Module): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(*, H_{in})` where :math:`*` means any number of dimensions including none and :math:`H_{in} = \text{in\_features}`. - Output: :math:`(*, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \text{out\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{in\_features}}` """ __constants__ = ["in_features", "out_features"] in_features: int out_features: int weight: Tensor def __init__( self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() # Allow 'weight' to be quantized self._quantizable_buffers.add("weight") self.in_features = in_features self.out_features = out_features # TODO: Do actual initialization here: e.g. kaiming_uniform or xavier_uniform self.weight = candle.ones((out_features, in_features), **factory_kwargs) if bias: self.bias = candle.zeros((out_features,), **factory_kwargs) else: self.bias = None def forward(self, x: Tensor) -> Tensor: dims = x.shape last_dim = dims[-1] if isinstance(self.weight, candle.QTensor): if len(dims) < 3: matmul_result = self.weight.matmul_t(x).broadcast_add(self.bias) elif len(dims) == 3: b, n, m = dims output_shape = (b, n, self.out_features) re = x.reshape((b * n, m)) matmul_result = self.weight.matmul_t(re).reshape((output_shape)) else: raise NotImplementedError("'QTensor.matmul_t' is not implemented for more than 3 dimensions") if self.bias: return matmul_result.broadcast_add(self.bias) else: if self.weight.shape[-1] == last_dim and len(dims) < 3: w = self.weight.t() else: batch_size = dims[0] w = self.weight.broadcast_left((batch_size,)).t() x = x.matmul(w) if self.bias is not None: x = x.broadcast_add(self.bias) return x def extra_repr(self) -> str: return f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}"
candle/candle-pyo3/py_src/candle/nn/linear.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/linear.py", "repo_id": "candle", "token_count": 1947 }
36
# See: https://raw.githubusercontent.com/huggingface/tokenizers/main/bindings/python/stub.py import argparse import inspect import os from typing import Optional import black from pathlib import Path import re INDENT = " " * 4 GENERATED_COMMENT = "# Generated content DO NOT EDIT\n" TYPING = """from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike """ CANDLE_SPECIFIC_TYPING = "from candle.typing import _ArrayLike, Device, Scalar, Index, Shape\n" CANDLE_TENSOR_IMPORTS = "from candle import Tensor,DType,QTensor\n" RETURN_TYPE_MARKER = "&RETURNS&: " ADDITIONAL_TYPEHINTS = {} FORWARD_REF_PATTERN = re.compile(r"ForwardRef\('([^']+)'\)") def do_indent(text: Optional[str], indent: str): if text is None: return "" return text.replace("\n", f"\n{indent}") def function(obj, indent: str, text_signature: str = None): if text_signature is None: text_signature = obj.__text_signature__ text_signature = text_signature.replace("$self", "self").lstrip().rstrip() doc_string = obj.__doc__ if doc_string is None: doc_string = "" # Check if we have a return type annotation in the docstring return_type = None doc_lines = doc_string.split("\n") if doc_lines[-1].lstrip().startswith(RETURN_TYPE_MARKER): # Extract the return type and remove it from the docstring return_type = doc_lines[-1].lstrip()[len(RETURN_TYPE_MARKER) :].strip() doc_string = "\n".join(doc_lines[:-1]) string = "" if return_type: string += f"{indent}def {obj.__name__}{text_signature} -> {return_type}:\n" else: string += f"{indent}def {obj.__name__}{text_signature}:\n" indent += INDENT string += f'{indent}"""\n' string += f"{indent}{do_indent(doc_string, indent)}\n" string += f'{indent}"""\n' string += f"{indent}pass\n" string += "\n" string += "\n" return string def member_sort(member): if inspect.isclass(member): value = 10 + len(inspect.getmro(member)) else: value = 1 return value def fn_predicate(obj): value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj) if value: return obj.__text_signature__ and not obj.__name__.startswith("_") if inspect.isgetsetdescriptor(obj): return not obj.__name__.startswith("_") return False def get_module_members(module): members = [ member for name, member in inspect.getmembers(module) if not name.startswith("_") and not inspect.ismodule(member) ] members.sort(key=member_sort) return members def pyi_file(obj, indent=""): string = "" if inspect.ismodule(obj): string += GENERATED_COMMENT string += TYPING string += CANDLE_SPECIFIC_TYPING if obj.__name__ != "candle.candle": string += CANDLE_TENSOR_IMPORTS members = get_module_members(obj) for member in members: string += pyi_file(member, indent) elif inspect.isclass(obj): indent += INDENT mro = inspect.getmro(obj) if len(mro) > 2: inherit = f"({mro[1].__name__})" else: inherit = "" string += f"class {obj.__name__}{inherit}:\n" body = "" if obj.__doc__: body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n' fns = inspect.getmembers(obj, fn_predicate) # Init if obj.__text_signature__: body += f"{indent}def __init__{obj.__text_signature__}:\n" body += f"{indent+INDENT}pass\n" body += "\n" if obj.__name__ in ADDITIONAL_TYPEHINTS: additional_members = inspect.getmembers(ADDITIONAL_TYPEHINTS[obj.__name__]) additional_functions = [] for name, member in additional_members: if inspect.isfunction(member): additional_functions.append((name, member)) def process_additional_function(fn): signature = inspect.signature(fn) cleaned_signature = re.sub(FORWARD_REF_PATTERN, r"\1", str(signature)) string = f"{indent}def {fn.__name__}{cleaned_signature}:\n" string += ( f'{indent+INDENT}"""{indent+INDENT}{do_indent(fn.__doc__, indent+INDENT)}{indent+INDENT}"""\n' ) string += f"{indent+INDENT}pass\n" string += "\n" return string for name, fn in additional_functions: body += process_additional_function(fn) for name, fn in fns: body += pyi_file(fn, indent=indent) if not body: body += f"{indent}pass\n" string += body string += "\n\n" elif inspect.isbuiltin(obj): string += f"{indent}@staticmethod\n" string += function(obj, indent) elif inspect.ismethoddescriptor(obj): string += function(obj, indent) elif inspect.isgetsetdescriptor(obj): # TODO it would be interesting to add the setter maybe ? string += f"{indent}@property\n" string += function(obj, indent, text_signature="(self)") elif obj.__class__.__name__ == "DType": string += f"class {str(obj).lower()}(DType):\n" string += f"{indent+INDENT}pass\n" else: raise Exception(f"Object {obj} is not supported") return string def py_file(module, origin): members = get_module_members(module) string = GENERATED_COMMENT string += f"from .. import {origin}\n" string += "\n" for member in members: if hasattr(member, "__name__"): name = member.__name__ else: name = str(member) string += f"{name} = {origin}.{name}\n" return string def do_black(content, is_pyi): mode = black.Mode( target_versions={black.TargetVersion.PY35}, line_length=119, is_pyi=is_pyi, string_normalization=True, ) try: return black.format_file_contents(content, fast=True, mode=mode) except black.NothingChanged: return content def write(module, directory, origin, check=False): submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)] filename = os.path.join(directory, "__init__.pyi") pyi_content = pyi_file(module) pyi_content = do_black(pyi_content, is_pyi=True) os.makedirs(directory, exist_ok=True) if check: with open(filename, "r") as f: data = f.read() assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(pyi_content) filename = os.path.join(directory, "__init__.py") py_content = py_file(module, origin) py_content = do_black(py_content, is_pyi=False) os.makedirs(directory, exist_ok=True) is_auto = False if not os.path.exists(filename): is_auto = True else: with open(filename, "r") as f: line = f.readline() if line == GENERATED_COMMENT: is_auto = True if is_auto: if check: with open(filename, "r") as f: data = f.read() assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(py_content) for name, submodule in submodules: write(submodule, os.path.join(directory, name), f"{name}", check=check) def extract_additional_types(module): additional_types = {} for name, member in inspect.getmembers(module): if inspect.isclass(member): if hasattr(member, "__name__"): name = member.__name__ else: name = str(member) if name not in additional_types: additional_types[name] = member return additional_types if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check", action="store_true") args = parser.parse_args() # Enable execution from the candle and candle-pyo3 directories cwd = Path.cwd() directory = "py_src/candle/" if cwd.name != "candle-pyo3": directory = f"candle-pyo3/{directory}" import candle import _additional_typing ADDITIONAL_TYPEHINTS = extract_additional_types(_additional_typing) write(candle.candle, directory, "candle", check=args.check)
candle/candle-pyo3/stub.py/0
{ "file_path": "candle/candle-pyo3/stub.py", "repo_id": "candle", "token_count": 3852 }
37
#![allow(unused)] /// A fast implementation of mamba for inference only. /// This is based on: https://github.com/LaurentMazare/mamba.rs use crate::models::with_tracing::{linear, linear_no_bias, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{RmsNorm, VarBuilder}; const D_CONV: usize = 4; const D_STATE: usize = 16; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { d_model: usize, n_layer: usize, vocab_size: usize, pad_vocab_size_multiple: usize, } impl Config { fn vocab_size(&self) -> usize { let pad = self.pad_vocab_size_multiple; (self.vocab_size + pad - 1) / pad * pad } fn dt_rank(&self) -> usize { (self.d_model + 15) / 16 } fn d_inner(&self) -> usize { self.d_model * 2 } } pub struct State { pub hs: Vec<Tensor>, pub prev_xs: Vec<[Tensor; D_CONV]>, pub pos: usize, } impl State { pub fn new(batch_size: usize, cfg: &Config, device: &Device) -> Result<Self> { let mut hs = Vec::with_capacity(cfg.n_layer); let mut prev_xs = Vec::with_capacity(cfg.n_layer); for _i in 0..cfg.n_layer { let h = Tensor::zeros((batch_size, cfg.d_inner(), D_STATE), DType::F32, device)?; let x = Tensor::zeros((batch_size, cfg.d_inner()), DType::F32, device)?; hs.push(h); prev_xs.push([x.clone(), x.clone(), x.clone(), x.clone()]); } Ok(Self { hs, prev_xs, pos: 0, }) } } #[derive(Clone, Debug)] pub struct MambaBlock { in_proj: Linear, conv1d_bias: Tensor, conv1d_weights: [Tensor; D_CONV], x_proj: Linear, dt_proj: Linear, a_log: Tensor, d: Tensor, out_proj: Linear, dt_rank: usize, layer_index: usize, d_inner: usize, } impl MambaBlock { pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let d_inner = cfg.d_inner(); let dt_rank = cfg.dt_rank(); let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?; let x_proj = linear_no_bias(d_inner, dt_rank + D_STATE * 2, vb.pp("x_proj"))?; let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?; let a_log = vb.get((d_inner, D_STATE), "A_log")?; let d = vb.get(d_inner, "D")?; let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?; let conv1d_bias = vb.get(d_inner, "conv1d.bias")?; let conv1d_weight = vb.get((d_inner, 1, D_CONV), "conv1d.weight")?; let conv1d_weights = [ conv1d_weight.i((.., 0, 0))?, conv1d_weight.i((.., 0, 1))?, conv1d_weight.i((.., 0, 2))?, conv1d_weight.i((.., 0, 3))?, ]; Ok(Self { in_proj, conv1d_bias, conv1d_weights, x_proj, dt_proj, a_log, d, out_proj, dt_rank, layer_index, d_inner, }) } pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { let (b_sz, _dim) = xs.dims2()?; let li = self.layer_index; let mut xs = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?; let proj_for_silu = xs.remove(1); state.prev_xs[li][state.pos % D_CONV] = xs.remove(0); let mut proj_for_conv = self.conv1d_bias.broadcast_as((b_sz, self.d_inner))?; for d_c in 0..D_CONV { proj_for_conv = (proj_for_conv + self.conv1d_weights[d_c] .broadcast_mul(&state.prev_xs[li][(d_c + 1 + state.pos) % D_CONV])?)?; } let proj_for_conv = candle_nn::ops::silu(&proj_for_conv)?; // SSM + Selection, we're doing inference here so only need the last step of // the sequence. // Algorithm 3.2 on page 6, https://arxiv.org/pdf/2312.00752.pdf let x_proj = self.x_proj.forward(&proj_for_conv)?; let delta = x_proj.narrow(D::Minus1, 0, self.dt_rank)?.contiguous()?; let b = x_proj.narrow(D::Minus1, self.dt_rank, D_STATE)?; let c = x_proj.narrow(D::Minus1, self.dt_rank + D_STATE, D_STATE)?; let delta = delta.apply(&self.dt_proj)?; // softplus let delta = (delta.exp()? + 1.)?.log()?; let a = self.a_log.to_dtype(candle::DType::F32)?.exp()?.neg()?; let d = self.d.to_dtype(candle::DType::F32)?; // Selective scan part // Eqn (2a), page 3, h_t = Ab h_{t-1} + Bb x_t let delta = delta .unsqueeze(D::Minus1)? .broadcast_as((b_sz, self.d_inner, D_STATE))?; let a = a.broadcast_as((b_sz, self.d_inner, D_STATE))?; let b = b.broadcast_as((b_sz, self.d_inner, D_STATE))?; let proj_for_conv_b = proj_for_conv .unsqueeze(D::Minus1)? .broadcast_as((b_sz, self.d_inner, D_STATE))?; state.hs[li] = ((&state.hs[li] * (&delta * &a)?.exp()?)? + &delta * &b * &proj_for_conv_b)?; let ss = (state.hs[li] .matmul(&c.unsqueeze(D::Minus1)?)? .squeeze(D::Minus1)? + proj_for_conv.broadcast_mul(&d)?)?; let ys = (ss * candle_nn::ops::silu(&proj_for_silu))?; ys.apply(&self.out_proj) } } #[derive(Clone, Debug)] pub struct ResidualBlock { mixer: MambaBlock, norm: RmsNorm, } impl ResidualBlock { pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?; let mixer = MambaBlock::new(layer_index, cfg, vb.pp("mixer"))?; Ok(Self { mixer, norm }) } fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { self.mixer.forward(&xs.apply(&self.norm)?, state)? + xs } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L56 #[derive(Clone, Debug)] pub struct Model { embedding: candle_nn::Embedding, layers: Vec<ResidualBlock>, norm_f: RmsNorm, lm_head: Linear, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = ResidualBlock::new(layer_idx, cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?; let lm_head = Linear::from_weights(embedding.embeddings().clone(), None); Ok(Self { embedding, layers, norm_f, lm_head, }) } pub fn forward(&self, input_ids: &Tensor, state: &mut State) -> Result<Tensor> { let _b_size = input_ids.dims1()?; let mut xs = self.embedding.forward(input_ids)?; for layer in self.layers.iter() { xs = layer.forward(&xs, state)? } state.pos += 1; xs.apply(&self.norm_f)?.apply(&self.lm_head) } }
candle/candle-transformers/src/models/mamba.rs/0
{ "file_path": "candle/candle-transformers/src/models/mamba.rs", "repo_id": "candle", "token_count": 3792 }
38
use crate::quantized_nn::{linear_no_bias, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use std::sync::Arc; pub use crate::models::mistral::Config; #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> { let n_rep = self.num_kv_groups; if n_rep == 1 { Ok(xs) } else { let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?; xs.unsqueeze(2)? .expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))? .reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim)) } } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/quantized_mistral.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mistral.rs", "repo_id": "candle", "token_count": 6082 }
39
use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] struct PositionEmbeddingRandom { positional_encoding_gaussian_matrix: Tensor, } impl PositionEmbeddingRandom { fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> { let positional_encoding_gaussian_matrix = vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?; Ok(Self { positional_encoding_gaussian_matrix, }) } fn pe_encoding(&self, coords: &Tensor) -> Result<Tensor> { let coords = coords.affine(2., -1.)?; let coords = coords.broadcast_matmul(&self.positional_encoding_gaussian_matrix)?; let coords = (coords * (2. * std::f64::consts::PI))?; Tensor::cat(&[coords.sin()?, coords.cos()?], D::Minus1) } fn forward(&self, h: usize, w: usize) -> Result<Tensor> { let device = self.positional_encoding_gaussian_matrix.device(); let x_embed = (Tensor::arange(0u32, w as u32, device)?.to_dtype(DType::F32)? + 0.5)?; let y_embed = (Tensor::arange(0u32, h as u32, device)?.to_dtype(DType::F32)? + 0.5)?; let x_embed = (x_embed / w as f64)? .reshape((1, ()))? .broadcast_as((h, w))?; let y_embed = (y_embed / h as f64)? .reshape(((), 1))? .broadcast_as((h, w))?; let coords = Tensor::stack(&[&x_embed, &y_embed], D::Minus1)?; self.pe_encoding(&coords)?.permute((2, 0, 1)) } fn forward_with_coords( &self, coords_input: &Tensor, image_size: (usize, usize), ) -> Result<Tensor> { let coords0 = (coords_input.narrow(D::Minus1, 0, 1)? / image_size.1 as f64)?; let coords1 = (coords_input.narrow(D::Minus1, 1, 1)? / image_size.0 as f64)?; let c = coords_input.dim(D::Minus1)?; let coords_rest = coords_input.narrow(D::Minus1, 2, c - 2)?; let coords = Tensor::cat(&[&coords0, &coords1, &coords_rest], D::Minus1)?; self.pe_encoding(&coords) } } #[derive(Debug)] pub struct PromptEncoder { pe_layer: PositionEmbeddingRandom, point_embeddings: Vec<candle_nn::Embedding>, not_a_point_embed: candle_nn::Embedding, mask_downscaling_conv1: candle_nn::Conv2d, mask_downscaling_ln1: super::LayerNorm2d, mask_downscaling_conv2: candle_nn::Conv2d, mask_downscaling_ln2: super::LayerNorm2d, mask_downscaling_conv3: candle_nn::Conv2d, no_mask_embed: candle_nn::Embedding, image_embedding_size: (usize, usize), input_image_size: (usize, usize), embed_dim: usize, span: tracing::Span, } impl PromptEncoder { pub fn new( embed_dim: usize, image_embedding_size: (usize, usize), input_image_size: (usize, usize), mask_in_chans: usize, vb: VarBuilder, ) -> Result<Self> { let num_points_embeddings = 4; let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?; let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?; let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?; let cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let mask_downscaling_conv1 = candle_nn::conv2d(1, mask_in_chans / 4, 2, cfg, vb.pp("mask_downscaling.0"))?; let mask_downscaling_conv2 = candle_nn::conv2d( mask_in_chans / 4, mask_in_chans, 2, cfg, vb.pp("mask_downscaling.3"), )?; let mask_downscaling_conv3 = candle_nn::conv2d( mask_in_chans, embed_dim, 1, Default::default(), vb.pp("mask_downscaling.6"), )?; let mask_downscaling_ln1 = super::LayerNorm2d::new(mask_in_chans / 4, 1e-6, vb.pp("mask_downscaling.1"))?; let mask_downscaling_ln2 = super::LayerNorm2d::new(mask_in_chans, 1e-6, vb.pp("mask_downscaling.4"))?; let mut point_embeddings = Vec::with_capacity(num_points_embeddings); let vb_e = vb.pp("point_embeddings"); for i in 0..num_points_embeddings { let emb = candle_nn::embedding(1, embed_dim, vb_e.pp(i))?; point_embeddings.push(emb) } let span = tracing::span!(tracing::Level::TRACE, "prompt-encoder"); Ok(Self { pe_layer, point_embeddings, not_a_point_embed, mask_downscaling_conv1, mask_downscaling_ln1, mask_downscaling_conv2, mask_downscaling_ln2, mask_downscaling_conv3, no_mask_embed, image_embedding_size, input_image_size, embed_dim, span, }) } pub fn get_dense_pe(&self) -> Result<Tensor> { self.pe_layer .forward(self.image_embedding_size.0, self.image_embedding_size.1)? .unsqueeze(0) } fn embed_masks(&self, masks: &Tensor) -> Result<Tensor> { masks .apply(&self.mask_downscaling_conv1)? .apply(&self.mask_downscaling_ln1)? .gelu()? .apply(&self.mask_downscaling_conv2)? .apply(&self.mask_downscaling_ln2)? .gelu()? .apply(&self.mask_downscaling_conv3) } fn embed_points(&self, points: &Tensor, labels: &Tensor, pad: bool) -> Result<Tensor> { let points = (points + 0.5)?; let dev = points.device(); let (points, labels) = if pad { let padding_point = Tensor::zeros((points.dim(0)?, 1, 2), DType::F32, dev)?; let padding_label = (Tensor::ones((labels.dim(0)?, 1), DType::F32, dev)? * (-1f64))?; let points = Tensor::cat(&[&points, &padding_point], 1)?; let labels = Tensor::cat(&[labels, &padding_label], 1)?; (points, labels) } else { (points, labels.clone()) }; let point_embedding = self .pe_layer .forward_with_coords(&points, self.input_image_size)?; let labels = labels.unsqueeze(2)?.broadcast_as(point_embedding.shape())?; let zeros = point_embedding.zeros_like()?; let point_embedding = labels.lt(0f32)?.where_cond( &self .not_a_point_embed .embeddings() .broadcast_as(zeros.shape())?, &point_embedding, )?; let labels0 = labels.eq(0f32)?.where_cond( &self.point_embeddings[0] .embeddings() .broadcast_as(zeros.shape())?, &zeros, )?; let point_embedding = (point_embedding + labels0)?; let labels1 = labels.eq(1f32)?.where_cond( &self.point_embeddings[1] .embeddings() .broadcast_as(zeros.shape())?, &zeros, )?; let point_embedding = (point_embedding + labels1)?; Ok(point_embedding) } fn embed_boxes(&self, boxes: &Tensor) -> Result<Tensor> { let boxes = (boxes + 0.5)?; let coords = boxes.reshape(((), 2, 2))?; let corner_embedding = self .pe_layer .forward_with_coords(&coords, self.input_image_size)?; let ce1 = corner_embedding.i((.., 0))?; let ce2 = corner_embedding.i((.., 1))?; let ce1 = (ce1 + self.point_embeddings[2].embeddings())?; let ce2 = (ce2 + self.point_embeddings[3].embeddings())?; Tensor::cat(&[&ce1, &ce2], 1) } pub fn forward( &self, points: Option<(&Tensor, &Tensor)>, boxes: Option<&Tensor>, masks: Option<&Tensor>, ) -> Result<(Tensor, Tensor)> { let _enter = self.span.enter(); let se_points = match points { Some((coords, labels)) => Some(self.embed_points(coords, labels, boxes.is_none())?), None => None, }; let se_boxes = match boxes { Some(boxes) => Some(self.embed_boxes(boxes)?), None => None, }; let sparse_embeddings = match (se_points, se_boxes) { (Some(se_points), Some(se_boxes)) => Tensor::cat(&[se_points, se_boxes], 1)?, (Some(se_points), None) => se_points, (None, Some(se_boxes)) => se_boxes, (None, None) => { Tensor::zeros((1, 0, self.embed_dim), DType::F32, &candle::Device::Cpu)? } }; let dense_embeddings = match masks { None => { let emb = self.no_mask_embed.embeddings(); emb.reshape((1, (), 1, 1))?.expand(( 1, emb.elem_count(), self.image_embedding_size.0, self.image_embedding_size.1, ))? } Some(masks) => self.embed_masks(masks)?, }; Ok((sparse_embeddings, dense_embeddings)) } }
candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs", "repo_id": "candle", "token_count": 4719 }
40
#![allow(dead_code)] //! # Variational Auto-Encoder (VAE) Models. //! //! Auto-encoder models compress their input to a usually smaller latent space //! before expanding it back to its original shape. This results in the latent values //! compressing the original information. use super::unet_2d_blocks::{ DownEncoderBlock2D, DownEncoderBlock2DConfig, UNetMidBlock2D, UNetMidBlock2DConfig, UpDecoderBlock2D, UpDecoderBlock2DConfig, }; use candle::{Result, Tensor}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone)] struct EncoderConfig { // down_block_types: DownEncoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, double_z: bool, } impl Default for EncoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, double_z: true, } } } #[derive(Debug)] struct Encoder { conv_in: nn::Conv2d, down_blocks: Vec<DownEncoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: EncoderConfig, } impl Encoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: EncoderConfig, ) -> Result<Self> { let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, config.block_out_channels[0], 3, conv_cfg, vs.pp("conv_in"), )?; let mut down_blocks = vec![]; let vs_down_blocks = vs.pp("down_blocks"); for index in 0..config.block_out_channels.len() { let out_channels = config.block_out_channels[index]; let in_channels = if index > 0 { config.block_out_channels[index - 1] } else { config.block_out_channels[0] }; let is_final = index + 1 == config.block_out_channels.len(); let cfg = DownEncoderBlock2DConfig { num_layers: config.layers_per_block, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_downsample: !is_final, downsample_padding: 0, ..Default::default() }; let down_block = DownEncoderBlock2D::new( vs_down_blocks.pp(&index.to_string()), in_channels, out_channels, cfg, )?; down_blocks.push(down_block) } let last_block_out_channels = *config.block_out_channels.last().unwrap(); let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let conv_norm_out = nn::group_norm( config.norm_num_groups, last_block_out_channels, 1e-6, vs.pp("conv_norm_out"), )?; let conv_out_channels = if config.double_z { 2 * out_channels } else { out_channels }; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( last_block_out_channels, conv_out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, down_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv_in)?; for down_block in self.down_blocks.iter() { xs = xs.apply(down_block)? } let xs = self .mid_block .forward(&xs, None)? .apply(&self.conv_norm_out)?; nn::ops::silu(&xs)?.apply(&self.conv_out) } } #[derive(Debug, Clone)] struct DecoderConfig { // up_block_types: UpDecoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, } impl Default for DecoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, } } } #[derive(Debug)] struct Decoder { conv_in: nn::Conv2d, up_blocks: Vec<UpDecoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: DecoderConfig, } impl Decoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DecoderConfig, ) -> Result<Self> { let n_block_out_channels = config.block_out_channels.len(); let last_block_out_channels = *config.block_out_channels.last().unwrap(); let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, last_block_out_channels, 3, conv_cfg, vs.pp("conv_in"), )?; let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let mut up_blocks = vec![]; let vs_up_blocks = vs.pp("up_blocks"); let reversed_block_out_channels: Vec<_> = config.block_out_channels.iter().copied().rev().collect(); for index in 0..n_block_out_channels { let out_channels = reversed_block_out_channels[index]; let in_channels = if index > 0 { reversed_block_out_channels[index - 1] } else { reversed_block_out_channels[0] }; let is_final = index + 1 == n_block_out_channels; let cfg = UpDecoderBlock2DConfig { num_layers: config.layers_per_block + 1, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_upsample: !is_final, ..Default::default() }; let up_block = UpDecoderBlock2D::new( vs_up_blocks.pp(&index.to_string()), in_channels, out_channels, cfg, )?; up_blocks.push(up_block) } let conv_norm_out = nn::group_norm( config.norm_num_groups, config.block_out_channels[0], 1e-6, vs.pp("conv_norm_out"), )?; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( config.block_out_channels[0], out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, up_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.mid_block.forward(&self.conv_in.forward(xs)?, None)?; for up_block in self.up_blocks.iter() { xs = up_block.forward(&xs)? } let xs = self.conv_norm_out.forward(&xs)?; let xs = nn::ops::silu(&xs)?; self.conv_out.forward(&xs) } } #[derive(Debug, Clone)] pub struct AutoEncoderKLConfig { pub block_out_channels: Vec<usize>, pub layers_per_block: usize, pub latent_channels: usize, pub norm_num_groups: usize, } impl Default for AutoEncoderKLConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 1, latent_channels: 4, norm_num_groups: 32, } } } pub struct DiagonalGaussianDistribution { mean: Tensor, std: Tensor, } impl DiagonalGaussianDistribution { pub fn new(parameters: &Tensor) -> Result<Self> { let mut parameters = parameters.chunk(2, 1)?.into_iter(); let mean = parameters.next().unwrap(); let logvar = parameters.next().unwrap(); let std = (logvar * 0.5)?.exp()?; Ok(DiagonalGaussianDistribution { mean, std }) } pub fn sample(&self) -> Result<Tensor> { let sample = self.mean.randn_like(0., 1.); &self.mean + &self.std * sample } } // https://github.com/huggingface/diffusers/blob/970e30606c2944e3286f56e8eb6d3dc6d1eb85f7/src/diffusers/models/vae.py#L485 // This implementation is specific to the config used in stable-diffusion-v1-5 // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json #[derive(Debug)] pub struct AutoEncoderKL { encoder: Encoder, decoder: Decoder, quant_conv: nn::Conv2d, post_quant_conv: nn::Conv2d, pub config: AutoEncoderKLConfig, } impl AutoEncoderKL { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: AutoEncoderKLConfig, ) -> Result<Self> { let latent_channels = config.latent_channels; let encoder_cfg = EncoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, double_z: true, }; let encoder = Encoder::new(vs.pp("encoder"), in_channels, latent_channels, encoder_cfg)?; let decoder_cfg = DecoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, }; let decoder = Decoder::new(vs.pp("decoder"), latent_channels, out_channels, decoder_cfg)?; let conv_cfg = Default::default(); let quant_conv = nn::conv2d( 2 * latent_channels, 2 * latent_channels, 1, conv_cfg, vs.pp("quant_conv"), )?; let post_quant_conv = nn::conv2d( latent_channels, latent_channels, 1, conv_cfg, vs.pp("post_quant_conv"), )?; Ok(Self { encoder, decoder, quant_conv, post_quant_conv, config, }) } /// Returns the distribution in the latent space. pub fn encode(&self, xs: &Tensor) -> Result<DiagonalGaussianDistribution> { let xs = self.encoder.forward(xs)?; let parameters = self.quant_conv.forward(&xs)?; DiagonalGaussianDistribution::new(&parameters) } /// Takes as input some sampled values. pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.post_quant_conv.forward(xs)?; self.decoder.forward(&xs) } }
candle/candle-transformers/src/models/stable_diffusion/vae.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/vae.rs", "repo_id": "candle", "token_count": 6006 }
41
pub mod attention_processor; pub mod common; pub mod ddpm; pub mod diffnext; pub mod paella_vq; pub mod prior;
candle/candle-transformers/src/models/wuerstchen/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/mod.rs", "repo_id": "candle", "token_count": 38 }
42
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::bert::{BertModel, Config}; use candle_wasm_example_bert::console_log; use tokenizers::{PaddingParams, Tokenizer}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { bert: BertModel, tokenizer: Tokenizer, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load(weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F64, device)?; let config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let bert = BertModel::load(vb, &config)?; Ok(Self { bert, tokenizer }) } pub fn get_embeddings(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: Params = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let device = &Device::Cpu; if let Some(pp) = self.tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; self.tokenizer.with_padding(Some(pp)); } let tokens = self .tokenizer .encode_batch(sentences.to_vec(), true) .map_err(|m| JsError::new(&m.to_string()))?; let token_ids: Vec<Tensor> = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<Result<Vec<_>, _>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let token_type_ids = token_ids.zeros_like()?; console_log!("running inference on batch {:?}", token_ids.shape()); let embeddings = self.bert.forward(&token_ids, &token_type_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; let embeddings_data = embeddings.to_vec2()?; Ok(serde_wasm_bindgen::to_value(&Embeddings { data: embeddings_data, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct Embeddings { data: Vec<Vec<f64>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct Params { sentences: Vec<String>, normalize_embeddings: bool, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/bert/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/bert/src/bin/m.rs", "repo_id": "candle", "token_count": 1534 }
43
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "llama2c-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Llama2C { static instance = {}; static async getInstance(weightsURL, modelID, tokenizerURL) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model(weightsArrayU8, tokenizerArrayU8); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting llama2.c" }); const model = await Llama2C.getInstance(weightsURL, modelID, tokenizerURL); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, seed ); const seq_len = model.get_seq_len(); let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/llama2-c/llama2cWorker.js/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/llama2cWorker.js", "repo_id": "candle", "token_count": 1223 }
44
## Running Segment Anything Example Here, we provide an example showing how to run the Segment Anything model in the browser. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
candle/candle-wasm-examples/segment-anything/README.md/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/README.md", "repo_id": "candle", "token_count": 220 }
45