code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def _SCREAMING_SNAKE_CASE () -> Optional[int]: '''simple docstring''' lowercase_ = _ask_options( """In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowercase_ = get_sagemaker_input() else: lowercase_ = get_cluster_input() return config def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=None ) -> int: '''simple docstring''' if subparsers is not None: lowercase_ = subparsers.add_parser("""config""" , description=UpperCamelCase__ ) else: lowercase_ = argparse.ArgumentParser("""Accelerate config command""" , description=UpperCamelCase__ ) parser.add_argument( """--config_file""" , default=UpperCamelCase__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase__ ) return parser def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple: '''simple docstring''' lowercase_ = get_user_input() if args.config_file is not None: lowercase_ = args.config_file else: if not os.path.isdir(UpperCamelCase__ ): os.makedirs(UpperCamelCase__ ) lowercase_ = default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(UpperCamelCase__ ) else: config.to_yaml_file(UpperCamelCase__ ) print(F'''accelerate configuration saved at {config_file}''' ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: '''simple docstring''' lowercase_ = config_command_parser() lowercase_ = parser.parse_args() config_command(UpperCamelCase__ ) if __name__ == "__main__": main()
567
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
def __lowerCAmelCase ( __snake_case ): __lowerCAmelCase = [] __lowerCAmelCase = set({"(", "[", "{"} ) __lowerCAmelCase = set({")", "]", "}"} ) __lowerCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(UpperCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCamelCase__ ) == 0 def __lowerCAmelCase ( ): __lowerCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(UpperCamelCase__ ): print(UpperCamelCase__ , "is balanced" ) else: print(UpperCamelCase__ , "is not balanced" ) if __name__ == "__main__": main()
367
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
import argparse import datetime def snake_case (UpperCamelCase : str ): '''simple docstring''' lowerCamelCase__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } lowerCamelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month lowerCamelCase__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) lowerCamelCase__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day lowerCamelCase__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator lowerCamelCase__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year lowerCamelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation lowerCamelCase__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: lowerCamelCase__ = y - 1 lowerCamelCase__ = m + 12 # maths var lowerCamelCase__ = int(str(UpperCamelCase__ )[:2] ) lowerCamelCase__ = int(str(UpperCamelCase__ )[2:] ) lowerCamelCase__ = int(2.6 * m - 5.3_9 ) lowerCamelCase__ = int(c / 4 ) lowerCamelCase__ = int(k / 4 ) lowerCamelCase__ = int(d + k ) lowerCamelCase__ = int(t + u + v + x ) lowerCamelCase__ = int(z - (2 * c) ) lowerCamelCase__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response lowerCamelCase__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() a__ : Optional[int] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) a__ : Any = parser.parse_args() zeller(args.date_input)
165
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a: Optional[Any] = logging.get_logger(__name__) __a: Union[str, Any] = {"""vocab_file""": """sentencepiece.model"""} __a: Optional[int] = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, } __a: Dict = { """google/rembert""": 2_56, } class UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , **__lowerCAmelCase , ) -> Tuple: super().__init__( do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) lowercase__ : List[str] = do_lower_case lowercase__ : Optional[int] = remove_space lowercase__ : Optional[Any] = keep_accents lowercase__ : List[Any] = vocab_file lowercase__ : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(__A ) @property def _lowerCAmelCase( self ) -> Optional[int]: return len(self.sp_model ) def _lowerCAmelCase( self ) -> str: lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: lowercase__ : Dict = self.__dict__.copy() lowercase__ : Tuple = None return state def __setstate__( self , __lowerCAmelCase ) -> Optional[Any]: lowercase__ : int = d lowercase__ : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False ) -> Tuple: lowercase__ : List[str] = self.sp_model.EncodeAsPieces(__A ) return pieces def _lowerCAmelCase( self , __lowerCAmelCase ) -> int: return self.sp_model.PieceToId(__A ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> int: return self.sp_model.IdToPiece(__A ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple: lowercase__ : List[Any] = self.sp_model.decode_pieces(__A ) return out_string def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]: lowercase__ : str = [self.sep_token_id] lowercase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]: lowercase__ : Optional[Any] = [self.sep_token_id] lowercase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) ) return lowercase__ : Any = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
152
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
"""simple docstring""" from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__(self , _lowercase , _lowercase , _lowercase = 0 ): '''simple docstring''' __a , __a : List[str] = row, column __a : List[Any] = [[default_value for c in range(__A )] for r in range(__A )] def __str__(self ): '''simple docstring''' __a : Union[str, Any] = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier __a : Optional[Any] = 0 for row_vector in self.array: for obj in row_vector: __a : Any = max(__A , len(str(__A ) ) ) __a : List[str] = F'''%{max_element_length}s''' # Make string and return def single_line(_lowercase ) -> str: nonlocal string_format_identifier __a : Union[str, Any] = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__A ) for row_vector in self.array ) return s def __repr__(self ): '''simple docstring''' return str(self ) def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__(self , _lowercase ): '''simple docstring''' assert self.validate_indicies(__A ) return self.array[loc[0]][loc[1]] def __setitem__(self , _lowercase , _lowercase ): '''simple docstring''' assert self.validate_indicies(__A ) __a : List[Any] = value def __add__(self , _lowercase ): '''simple docstring''' assert isinstance(__A , __A ) assert self.row == another.row and self.column == another.column # Add __a : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a : str = self[r, c] + another[r, c] return result def __neg__(self ): '''simple docstring''' __a : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a : int = -self[r, c] return result def __sub__(self , _lowercase ): '''simple docstring''' return self + (-another) def __mul__(self , _lowercase ): '''simple docstring''' if isinstance(__A , (int, float) ): # Scalar multiplication __a : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a : str = self[r, c] * another return result elif isinstance(__A , __A ): # Matrix multiplication assert self.column == another.row __a : List[str] = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a : Any = F'''Unsupported type given for another ({type(__A )})''' raise TypeError(__A ) def lowerCAmelCase__(self ): '''simple docstring''' __a : Union[str, Any] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a : List[Any] = self[r, c] return result def lowerCAmelCase__(self , _lowercase , _lowercase ): '''simple docstring''' assert isinstance(__A , __A ) and isinstance(__A , __A ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a : List[Any] = v.transpose() __a : Dict = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __magic_name__ ( ): # a^(-1) __a : List[str] = Matrix(3 , 3 , 0 ) for i in range(3 ): __a : Optional[int] = 1 print(F'''a^(-1) is {ainv}''' ) # u, v __a : List[Any] = Matrix(3 , 1 , 0 ) __a , __a , __a : Optional[Any] = 1, 2, -3 __a : Dict = Matrix(3 , 1 , 0 ) __a , __a , __a : Tuple = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' ) def __magic_name__ ( ): import doctest doctest.testmod() testa()
581
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( SCREAMING_SNAKE_CASE : Union[dict, list, tuple, torch.Tensor] ): """simple docstring""" snake_case__ : Tuple =[] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): for v in tree.values(): shapes.extend(_fetch_dims(UpperCamelCase__ ) ) elif isinstance(UpperCamelCase__ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(UpperCamelCase__ ) ) elif isinstance(UpperCamelCase__ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple[int, ...] ): """simple docstring""" snake_case__ : Optional[int] =[] for d in reversed(UpperCamelCase__ ): idx.append(flat_idx % d ) snake_case__ : Any =flat_idx // d return tuple(reversed(UpperCamelCase__ ) ) @torch.jit.ignore def lowercase_ ( SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE : Optional[Sequence[bool]] = None , ): """simple docstring""" # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(SCREAMING_SNAKE_CASE : List[bool] ) -> None: snake_case__ : List[str] =True for i in range(len(UpperCamelCase__ ) ): snake_case__ : Union[str, Any] =-1 * (i + 1) l[reversed_idx] &= tally snake_case__ : Any =l[reversed_idx] if start_edges is None: snake_case__ : List[str] =[s == 0 for s in start] reduce_edge_list(UpperCamelCase__ ) if end_edges is None: snake_case__ : Optional[Any] =[e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )] reduce_edge_list(UpperCamelCase__ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(UpperCamelCase__ ) == 0: return [()] elif len(UpperCamelCase__ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] snake_case__ : List[Any] =[] snake_case__ : Union[str, Any] =[] # Dimensions common to start and end can be selected directly for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ): if s == e: path_list.append(slice(UpperCamelCase__ , s + 1 ) ) else: break snake_case__ : Optional[Any] =tuple(UpperCamelCase__ ) snake_case__ : Any =len(UpperCamelCase__ ) # start == end, and we're done if divergence_idx == len(UpperCamelCase__ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None snake_case__ : Dict =start[divergence_idx] return tuple( path + (slice(UpperCamelCase__ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None snake_case__ : Any =end[divergence_idx] return tuple( path + (slice(UpperCamelCase__ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) snake_case__ : Optional[int] =end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowercase_ ( SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" snake_case__ : Union[str, Any] =t.shape[:no_batch_dims] snake_case__ : Optional[int] =list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) ) # _get_minimal_slice_set is inclusive snake_case__ : str =list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) ) # Get an ordered list of slices to perform snake_case__ : int =_get_minimal_slice_set( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) snake_case__ : Optional[int] =[t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowercase_ ( SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : Dict[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Any = None , SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" if not (len(UpperCamelCase__ ) > 0): raise ValueError('''Must provide at least one input''' ) snake_case__ : List[str] =[shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )] snake_case__ : Optional[Any] =tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] ) def _prep_inputs(SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: snake_case__ : List[Any] =t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) snake_case__ : Any =t.reshape(-1 , *t.shape[no_batch_dims:] ) else: snake_case__ : Optional[Any] =t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t snake_case__ : List[str] =tensor_tree_map(_prep_inputs , UpperCamelCase__ ) snake_case__ : Optional[Any] =None if _out is not None: snake_case__ : Union[str, Any] =tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) snake_case__ : List[str] =1 for d in orig_batch_dims: flat_batch_dim *= d snake_case__ : Tuple =flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t snake_case__ : Optional[Any] =0 snake_case__ : Dict =prepped_outputs for _ in range(UpperCamelCase__ ): # Chunk the input if not low_mem: snake_case__ : Dict =_select_chunk else: snake_case__ : Any =partial( _chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , ) snake_case__ : List[Any] =tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ ) # Run the layer on the chunk snake_case__ : Optional[int] =layer(**UpperCamelCase__ ) # Allocate space for the output if out is None: snake_case__ : Any =tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ ) # Put the chunk in its pre-allocated space if isinstance(UpperCamelCase__ , UpperCamelCase__ ): def assign(SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict ) -> None: for k, v in da.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): assign(UpperCamelCase__ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: snake_case__ : Any =da[k] assign(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ): if _add_into_out: xa[i : i + chunk_size] += xa else: snake_case__ : List[Any] =xa elif isinstance(UpperCamelCase__ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: snake_case__ : int =output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size snake_case__ : str =tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ ) return out class _lowerCAmelCase : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ) -> Tuple: """simple docstring""" snake_case__ : int =max_chunk_size snake_case__ : str =None snake_case__ : List[str] =None def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size snake_case__ : List[Any] =[2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] snake_case__ : Any =[c for c in candidates if c > min_chunk_size] snake_case__ : Tuple =[min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool: try: with torch.no_grad(): fn(*__A , chunk_size=__A ) return True except RuntimeError: return False snake_case__ : Any =0 snake_case__ : Tuple =len(__A ) - 1 while i > min_viable_chunk_size_index: snake_case__ : Optional[int] =test_chunk_size(candidates[i] ) if not viable: snake_case__ : int =(min_viable_chunk_size_index + i) // 2 else: snake_case__ : int =i snake_case__ : Dict =(i + len(__A ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" snake_case__ : Dict =True for aa, aa in zip(__A , __A ): assert type(__A ) == type(__A ) if isinstance(__A , (list, tuple) ): consistent &= self._compare_arg_caches(__A , __A ) elif isinstance(__A , __A ): snake_case__ : List[Any] =[v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )] snake_case__ : Union[str, Any] =[v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )] consistent &= self._compare_arg_caches(__A , __A ) else: consistent &= aa == aa return consistent def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" snake_case__ : str =True snake_case__ : Any =tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__A , torch.Tensor ) else a , __A , __A ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__A ) snake_case__ : Optional[Any] =self._compare_arg_caches(self.cached_arg_data , __A ) else: # Otherwise, we can reuse the precomputed value snake_case__ : Union[str, Any] =False if not consistent: snake_case__ : Dict =self._determine_favorable_chunk_size( __A , __A , __A , ) snake_case__ : str =arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
381
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class _snake_case : '''simple docstring''' def __init__( self : str , snake_case : str , snake_case : List[Any] , snake_case : bool = True , snake_case : bool = False ): UpperCAmelCase_ :List[str] = scheduler UpperCAmelCase_ :Tuple = optimizers if isinstance(__A , (list, tuple) ) else [optimizers] UpperCAmelCase_ :Union[str, Any] = split_batches UpperCAmelCase_ :int = step_with_optimizer UpperCAmelCase_ :int = GradientState() def snake_case_ ( self : List[Any] , *snake_case : Dict , **snake_case : Any ): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__A , **__A ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__A , **__A ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step UpperCAmelCase_ :Tuple = AcceleratorState().num_processes for _ in range(__A ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__A , **__A ) else: self.scheduler.step(*__A , **__A ) def snake_case_ ( self : Tuple ): return self.scheduler.get_last_lr() def snake_case_ ( self : Union[str, Any] ): return self.scheduler.state_dict() def snake_case_ ( self : Tuple , snake_case : Any ): self.scheduler.load_state_dict(__A ) def snake_case_ ( self : Optional[int] ): return self.scheduler.get_lr() def snake_case_ ( self : Any , *snake_case : Optional[int] , **snake_case : Optional[int] ): return self.scheduler.print_lr(*__A , **__A )
608
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : Optional[Any] = """table-transformer""" __snake_case : Optional[Any] = ["""past_key_values"""] __snake_case : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self :Any , __lowercase :int=True , __lowercase :List[str]=None , __lowercase :Tuple=3 , __lowercase :Tuple=100 , __lowercase :Union[str, Any]=6 , __lowercase :Tuple=2048 , __lowercase :List[str]=8 , __lowercase :str=6 , __lowercase :List[Any]=2048 , __lowercase :Dict=8 , __lowercase :str=0.0 , __lowercase :List[str]=0.0 , __lowercase :Tuple=True , __lowercase :Tuple="relu" , __lowercase :Dict=256 , __lowercase :str=0.1 , __lowercase :str=0.0 , __lowercase :int=0.0 , __lowercase :Union[str, Any]=0.02 , __lowercase :List[str]=1.0 , __lowercase :List[Any]=False , __lowercase :List[Any]="sine" , __lowercase :Dict="resnet50" , __lowercase :str=True , __lowercase :str=False , __lowercase :Tuple=1 , __lowercase :str=5 , __lowercase :Optional[int]=2 , __lowercase :Union[str, Any]=1 , __lowercase :Union[str, Any]=1 , __lowercase :Union[str, Any]=5 , __lowercase :List[Any]=2 , __lowercase :Tuple=0.1 , **__lowercase :str , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCamelCase : Any =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__A , __A ): __lowerCamelCase : List[Any] =backbone_config.get('''model_type''' ) __lowerCamelCase : Optional[Any] =CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : List[str] =config_class.from_dict(__A ) # set timm attributes to None __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str =None, None, None __lowerCamelCase : Dict =use_timm_backbone __lowerCamelCase : Union[str, Any] =backbone_config __lowerCamelCase : Optional[int] =num_channels __lowerCamelCase : int =num_queries __lowerCamelCase : Tuple =d_model __lowerCamelCase : int =encoder_ffn_dim __lowerCamelCase : List[Any] =encoder_layers __lowerCamelCase : List[Any] =encoder_attention_heads __lowerCamelCase : str =decoder_ffn_dim __lowerCamelCase : str =decoder_layers __lowerCamelCase : Tuple =decoder_attention_heads __lowerCamelCase : Any =dropout __lowerCamelCase : Optional[int] =attention_dropout __lowerCamelCase : Union[str, Any] =activation_dropout __lowerCamelCase : Any =activation_function __lowerCamelCase : Union[str, Any] =init_std __lowerCamelCase : Optional[int] =init_xavier_std __lowerCamelCase : List[str] =encoder_layerdrop __lowerCamelCase : int =decoder_layerdrop __lowerCamelCase : int =encoder_layers __lowerCamelCase : Tuple =auxiliary_loss __lowerCamelCase : List[str] =position_embedding_type __lowerCamelCase : List[str] =backbone __lowerCamelCase : List[str] =use_pretrained_backbone __lowerCamelCase : Dict =dilation # Hungarian matcher __lowerCamelCase : Dict =class_cost __lowerCamelCase : Dict =bbox_cost __lowerCamelCase : Union[str, Any] =giou_cost # Loss coefficients __lowerCamelCase : str =mask_loss_coefficient __lowerCamelCase : Optional[int] =dice_loss_coefficient __lowerCamelCase : Any =bbox_loss_coefficient __lowerCamelCase : List[str] =giou_loss_coefficient __lowerCamelCase : Dict =eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowercase ( self :Tuple ): return self.encoder_attention_heads @property def __lowercase ( self :Optional[Any] ): return self.d_model class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : List[Any] = version.parse("""1.11""" ) @property def __lowercase ( self :Optional[int] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __lowercase ( self :int ): return 1e-5 @property def __lowercase ( self :Optional[int] ): return 12
179
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase , lowercase = position lowercase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase = [] for position in positions: lowercase , lowercase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(UpperCamelCase__ ) return permissible_positions def UpperCamelCase__ ( lowerCAmelCase__ ): return not any(elem == 0 for row in board for elem in row ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if is_complete(UpperCamelCase__ ): return True for position in get_valid_pos(UpperCamelCase__ ,len(UpperCamelCase__ ) ): lowercase , lowercase = position if board[y][x] == 0: lowercase = curr + 1 if open_knight_tour_helper(UpperCamelCase__ ,UpperCamelCase__ ,curr + 1 ): return True lowercase = 0 return False def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): lowercase = 1 if open_knight_tour_helper(UpperCamelCase__ ,(i, j) ,1 ): return board lowercase = 0 lowercase = f"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
428
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __UpperCAmelCase : str = '''nat''' __UpperCAmelCase : List[str] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] ,_a : Union[str, Any]=4 ,_a : Dict=3 ,_a : str=64 ,_a : Optional[int]=[3, 4, 6, 5] ,_a : Tuple=[2, 4, 8, 16] ,_a : List[str]=7 ,_a : Optional[Any]=3.0 ,_a : Tuple=True ,_a : Tuple=0.0 ,_a : Dict=0.0 ,_a : Tuple=0.1 ,_a : str="gelu" ,_a : Tuple=0.02 ,_a : str=1E-5 ,_a : Tuple=0.0 ,_a : List[str]=None ,_a : Optional[Any]=None ,**_a : Optional[Any] ,): '''simple docstring''' super().__init__(**__A ) _a : Tuple = patch_size _a : Tuple = num_channels _a : Tuple = embed_dim _a : Union[str, Any] = depths _a : Optional[int] = len(__A ) _a : List[str] = num_heads _a : List[str] = kernel_size _a : Tuple = mlp_ratio _a : str = qkv_bias _a : int = hidden_dropout_prob _a : Optional[Any] = attention_probs_dropout_prob _a : List[str] = drop_path_rate _a : Union[str, Any] = hidden_act _a : List[Any] = layer_norm_eps _a : Tuple = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _a : Tuple = int(embed_dim * 2 ** (len(__A ) - 1) ) _a : Optional[Any] = layer_scale_init_value _a : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(__A ) + 1 )] _a, _a : List[Any] = get_aligned_output_features_output_indices( out_features=__A ,out_indices=__A ,stage_names=self.stage_names )
229
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a ( _UpperCAmelCase ) -> List[Any]: """simple docstring""" return 1 / (1 + np.exp(-z )) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" a_ = np.dot(UpperCamelCase__ , UpperCamelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=7_0_0_0_0 ) -> Any: """simple docstring""" a_ = np.zeros(x.shape[1] ) for iterations in range(UpperCamelCase__ ): a_ = np.dot(UpperCamelCase__ , UpperCamelCase__ ) a_ = sigmoid_function(UpperCamelCase__ ) a_ = np.dot(x.T , h - y ) / y.size a_ = theta - alpha * gradient # updating the weights a_ = np.dot(UpperCamelCase__ , UpperCamelCase__ ) a_ = sigmoid_function(UpperCamelCase__ ) a_ = cost_function(UpperCamelCase__ , UpperCamelCase__ ) if iterations % 1_0_0 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": __lowerCAmelCase =datasets.load_iris() __lowerCAmelCase =iris.data[:, :2] __lowerCAmelCase =(iris.target != 0) * 1 __lowerCAmelCase =0.1 __lowerCAmelCase =logistic_reg(alpha, x, y, max_iterations=7_0000) print("theta: ", theta) # printing the theta i.e our weights vector def a ( _UpperCAmelCase ) -> Any: """simple docstring""" return sigmoid_function( np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") ((__lowerCAmelCase) , (__lowerCAmelCase)) =(x[:, 0].min(), x[:, 0].max()) ((__lowerCAmelCase) , (__lowerCAmelCase)) =(x[:, 1].min(), x[:, 1].max()) ((__lowerCAmelCase) , (__lowerCAmelCase)) =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) __lowerCAmelCase =np.c_[xxa.ravel(), xxa.ravel()] __lowerCAmelCase =predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
697
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCAmelCase : List[Any] = Lock() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(UpperCamelCase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowercase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowercase_ = min(UpperCamelCase__ , UpperCamelCase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(UpperCamelCase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowercase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowercase_ = max(UpperCamelCase__ , UpperCamelCase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(UpperCamelCase__ ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ = [] lowercase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowercase_ = Pipe() lowercase_ = Pipe() process_array_.append( Process( target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowercase_ = temp_rs lowercase_ = temp_rr for i in range(1 , len(UpperCamelCase__ ) - 1 ): lowercase_ = Pipe() lowercase_ = Pipe() process_array_.append( Process( target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowercase_ = temp_rs lowercase_ = temp_rr process_array_.append( Process( target=UpperCamelCase__ , args=( len(UpperCamelCase__ ) - 1, arr[len(UpperCamelCase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(UpperCamelCase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(UpperCamelCase__ ) ): lowercase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: '''simple docstring''' lowercase_ = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*UpperCamelCase__ ) lowercase_ = odd_even_transposition(UpperCamelCase__ ) print("""Sorted List\n""" ) print(*UpperCamelCase__ ) if __name__ == "__main__": main()
567
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _UpperCamelCase (UpperCamelCase__ ): snake_case_ = ["""image_processor""", """tokenizer"""] snake_case_ = """BlipImageProcessor""" snake_case_ = """AutoTokenizer""" def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any: super().__init__(__A , __A ) # add QFormer tokenizer __lowerCAmelCase = qformer_tokenizer def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __lowerCAmelCase = BatchFeature() if text is not None: __lowerCAmelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) encoding.update(__A ) __lowerCAmelCase = self.qformer_tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) __lowerCAmelCase = qformer_text_encoding.pop("input_ids" ) __lowerCAmelCase = qformer_text_encoding.pop("attention_mask" ) if images is not None: __lowerCAmelCase = self.image_processor(__A , return_tensors=__A ) encoding.update(__A ) return encoding def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]: return self.tokenizer.batch_decode(*__A , **__A ) def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> int: return self.tokenizer.decode(*__A , **__A ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCAmelCase ( self )-> Optional[Any]: __lowerCAmelCase = self.tokenizer.model_input_names __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def __UpperCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase )-> str: if os.path.isfile(__A ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) __lowerCAmelCase = os.path.join(__A , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__A ) return super().save_pretrained(__A , **__A ) @classmethod def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]: __lowerCAmelCase = AutoTokenizer.from_pretrained(__A , subfolder="qformer_tokenizer" ) __lowerCAmelCase = cls._get_arguments_from_pretrained(__A , **__A ) args.append(__A ) return cls(*__A )
367
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
from __future__ import annotations class lowercase : """simple docstring""" def __init__( self : List[str] , a_ : str , a_ : str ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ = text, pattern lowerCamelCase__ , lowerCamelCase__ = len(__A ), len(__A ) def _UpperCamelCase ( self : Optional[int] , a_ : str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _UpperCamelCase ( self : Optional[Any] , a_ : int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _UpperCamelCase ( self : List[str] ): """simple docstring""" lowerCamelCase__ = [] for i in range(self.textLen - self.patLen + 1 ): lowerCamelCase__ = self.mismatch_in_text(__A ) if mismatch_index == -1: positions.append(__A ) else: lowerCamelCase__ = self.match_in_pattern(self.text[mismatch_index] ) lowerCamelCase__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions a__ : List[str] = """ABAABA""" a__ : Tuple = """AB""" a__ : str = BoyerMooreSearch(text, pattern) a__ : str = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
165
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> str: lowercase__ : List[str] = params lowercase__ : List[Any] = np.array(__A ) lowercase__ : Optional[Any] = np.array([len(__A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ) -> Optional[Any]: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> List[Any]: return len(self.lengths ) def _lowerCAmelCase( self ) -> str: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCAmelCase( self ) -> Tuple: lowercase__ : List[Any] = self.params.max_model_input_size lowercase__ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(__A )} too long sequences.""" ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(__A ) , __A )] lowercase__ : List[str] = [] lowercase__ : Union[str, Any] = [] if self.params.mlm: lowercase__ , lowercase__ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: lowercase__ , lowercase__ : int = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowercase__ : Tuple = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowercase__ : Dict = np.insert(__A , 0 , __A ) if sub_s[-1] != sep_id: lowercase__ : List[str] = np.insert(__A , len(__A ) , __A ) assert len(__A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__A ) new_tok_ids.extend(__A ) new_lengths.extend([len(__A ) for l in sub_seqs] ) lowercase__ : Tuple = np.array(__A ) lowercase__ : Any = np.array(__A ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : List[Any] = len(self ) lowercase__ : Optional[int] = self.lengths > 11 lowercase__ : List[Any] = self.token_ids[indices] lowercase__ : Optional[int] = self.lengths[indices] lowercase__ : Optional[int] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def _lowerCAmelCase( self ) -> Any: if "unk_token" not in self.params.special_tok_ids: return else: lowercase__ : Union[str, Any] = self.params.special_tok_ids['''unk_token'''] lowercase__ : Tuple = len(self ) lowercase__ : Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowercase__ : List[str] = (unk_occs / self.lengths) < 0.5 lowercase__ : List[Any] = self.token_ids[indices] lowercase__ : Any = self.lengths[indices] lowercase__ : int = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def _lowerCAmelCase( self ) -> List[str]: if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: lowercase__ : str = [t[0] for t in batch] lowercase__ : Union[str, Any] = [t[1] for t in batch] assert len(__A ) == len(__A ) # Max for paddings lowercase__ : Dict = max(__A ) # Pad token ids if self.params.mlm: lowercase__ : List[Any] = self.params.special_tok_ids['''pad_token'''] else: lowercase__ : Optional[int] = self.params.special_tok_ids['''unk_token'''] lowercase__ : List[Any] = [list(t.astype(__A ) ) + [pad_idx] * (max_seq_len_ - len(__A )) for t in token_ids] assert len(tk_ ) == len(__A ) assert all(len(__A ) == max_seq_len_ for t in tk_ ) lowercase__ : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_) lowercase__ : Optional[Any] = torch.tensor(__A ) # (bs) return tk_t, lg_t
152
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=None ): __a : Union[str, Any] = None if token is not None: __a : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} __a : Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __a : int = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() __a : str = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) __a : Dict = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(UpperCamelCase__ ): __a : Dict = requests.get(url + F'''&page={i + 2}''' , headers=UpperCamelCase__ ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : str=None ): __a : Optional[int] = None if token is not None: __a : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} __a : List[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' __a : Dict = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json() __a : Optional[int] = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) __a : Any = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(UpperCamelCase__ ): __a : Optional[Any] = requests.get(url + F'''&page={i + 2}''' , headers=UpperCamelCase__ ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ): __a : str = None if token is not None: __a : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} __a : Any = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) __a : Union[str, Any] = result.headers["""Location"""] __a : Dict = requests.get(UpperCamelCase__ , allow_redirects=UpperCamelCase__ ) __a : int = os.path.join(UpperCamelCase__ , F'''{artifact_name}.zip''' ) with open(UpperCamelCase__ , """wb""" ) as fp: fp.write(response.content ) def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Any=None ): __a : Union[str, Any] = [] __a : List[Any] = [] __a : Union[str, Any] = None with zipfile.ZipFile(UpperCamelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCamelCase__ ) as f: for line in f: __a : str = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs __a : Any = line[: line.index(""": """ )] __a : Optional[Any] = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed __a : Optional[Any] = line[len("""FAILED """ ) :] failed_tests.append(UpperCamelCase__ ) elif filename == "job_name.txt": __a : List[str] = line if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase__ )} for `errors` ''' F'''and {len(UpperCamelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' """ problem.""" ) __a : Union[str, Any] = None if job_name and job_links: __a : Tuple = job_links.get(UpperCamelCase__ , UpperCamelCase__ ) # A list with elements of the form (line of error, error, failed test) __a : str = [x + [y] + [job_link] for x, y in zip(UpperCamelCase__ , UpperCamelCase__ )] return result def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None ): __a : Any = [] __a : Union[str, Any] = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCamelCase__ , job_links=UpperCamelCase__ ) ) return errors def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : str=None ): __a : List[Any] = Counter() counter.update([x[1] for x in logs] ) __a : Tuple = counter.most_common() __a : Tuple = {} for error, count in counts: if error_filter is None or error not in error_filter: __a : List[str] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} __a : List[str] = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def __magic_name__ ( _lowerCamelCase : Tuple ): __a : List[Any] = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): __a : List[str] = test.split("""/""" )[2] else: __a : Optional[int] = None return test def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None ): __a : Tuple = [(x[0], x[1], get_model(x[2] )) for x in logs] __a : int = [x for x in logs if x[2] is not None] __a : List[Any] = {x[2] for x in logs} __a : Union[str, Any] = {} for test in tests: __a : Optional[Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) __a : Dict = counter.most_common() __a : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} __a : Tuple = sum(error_counts.values() ) if n_errors > 0: __a : str = {"""count""": n_errors, """errors""": error_counts} __a : Any = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=UpperCamelCase__ ) ) return r def __magic_name__ ( _lowerCamelCase : Tuple ): __a : Union[str, Any] = """| no. | error | status |""" __a : Optional[int] = """|-:|:-|:-|""" __a : Any = [header, sep] for error in reduced_by_error: __a : Optional[Any] = reduced_by_error[error]["""count"""] __a : Tuple = F'''| {count} | {error[:1_0_0]} | |''' lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) def __magic_name__ ( _lowerCamelCase : Tuple ): __a : List[str] = """| model | no. of errors | major error | count |""" __a : Tuple = """|-:|-:|-:|-:|""" __a : Tuple = [header, sep] for model in reduced_by_model: __a : str = reduced_by_model[model]["""count"""] __a , __a : Any = list(reduced_by_model[model]["""errors"""].items() )[0] __a : Any = F'''| {model} | {count} | {error[:6_0]} | {_count} |''' lines.append(UpperCamelCase__ ) return "\n".join(UpperCamelCase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") lowercase__ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowercase__ = get_job_links(args.workflow_run_id, token=args.token) lowercase__ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowercase__ = k.find(" / ") lowercase__ = k[index + len(" / ") :] lowercase__ = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowercase__ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowercase__ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowercase__ = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowercase__ = reduce_by_error(errors) lowercase__ = reduce_by_model(errors) lowercase__ = make_github_table(reduced_by_error) lowercase__ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
581
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _snake_case ( UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ =UnCLIPImageVariationPipeline UpperCamelCase__ =IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""} UpperCamelCase__ =IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ =[ """generator""", """return_dict""", """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] UpperCamelCase__ =False @property def snake_case_ ( self : List[str] ): return 32 @property def snake_case_ ( self : List[Any] ): return 32 @property def snake_case_ ( self : Optional[int] ): return self.time_input_dim @property def snake_case_ ( self : List[str] ): return self.time_input_dim * 4 @property def snake_case_ ( self : Tuple ): return 100 @property def snake_case_ ( self : int ): UpperCAmelCase_ :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def snake_case_ ( self : int ): torch.manual_seed(0 ) UpperCAmelCase_ :Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(__A ) @property def snake_case_ ( self : Union[str, Any] ): torch.manual_seed(0 ) UpperCAmelCase_ :Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(__A ) @property def snake_case_ ( self : List[str] ): torch.manual_seed(0 ) UpperCAmelCase_ :List[str] = { '''clip_embeddings_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''cross_attention_dim''': self.cross_attention_dim, } UpperCAmelCase_ :Union[str, Any] = UnCLIPTextProjModel(**__A ) return model @property def snake_case_ ( self : Union[str, Any] ): torch.manual_seed(0 ) UpperCAmelCase_ :Union[str, Any] = { '''sample_size''': 32, # RGB in channels '''in_channels''': 3, # Out channels is double in channels because predicts mean and variance '''out_channels''': 6, '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': '''identity''', } UpperCAmelCase_ :List[Any] = UNetaDConditionModel(**__A ) return model @property def snake_case_ ( self : List[str] ): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def snake_case_ ( self : Tuple ): torch.manual_seed(0 ) UpperCAmelCase_ :Tuple = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def snake_case_ ( self : Optional[int] ): torch.manual_seed(1 ) UpperCAmelCase_ :Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs ) return model def snake_case_ ( self : str ): UpperCAmelCase_ :Optional[Any] = self.dummy_decoder UpperCAmelCase_ :int = self.dummy_text_proj UpperCAmelCase_ :List[str] = self.dummy_text_encoder UpperCAmelCase_ :Tuple = self.dummy_tokenizer UpperCAmelCase_ :str = self.dummy_super_res_first UpperCAmelCase_ :Dict = self.dummy_super_res_last UpperCAmelCase_ :List[str] = UnCLIPScheduler( variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , ) UpperCAmelCase_ :Tuple = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , ) UpperCAmelCase_ :List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) UpperCAmelCase_ :Union[str, Any] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def snake_case_ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str]=0 , snake_case : str=True ): UpperCAmelCase_ :Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith('''mps''' ): UpperCAmelCase_ :int = torch.manual_seed(__A ) else: UpperCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) if pil_image: UpperCAmelCase_ :str = input_image * 0.5 + 0.5 UpperCAmelCase_ :Optional[Any] = input_image.clamp(0 , 1 ) UpperCAmelCase_ :str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCAmelCase_ :str = DiffusionPipeline.numpy_to_pil(__A )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def snake_case_ ( self : str ): UpperCAmelCase_ :List[Any] = '''cpu''' UpperCAmelCase_ :Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ :int = self.pipeline_class(**__A ) UpperCAmelCase_ :Dict = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ :List[Any] = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Tuple = pipe(**__A ) UpperCAmelCase_ :Tuple = output.images UpperCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Dict = pipe( **__A , return_dict=__A , )[0] UpperCAmelCase_ :int = image[0, -3:, -3:, -1] UpperCAmelCase_ :int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ :int = np.array( [ 0.9_997, 0.0_002, 0.9_997, 0.9_997, 0.9_969, 0.0_023, 0.9_997, 0.9_969, 0.9_970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ ( self : List[Any] ): UpperCAmelCase_ :Any = '''cpu''' UpperCAmelCase_ :int = self.get_dummy_components() UpperCAmelCase_ :List[Any] = self.pipeline_class(**__A ) UpperCAmelCase_ :Tuple = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ :Dict = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Optional[int] = pipe(**__A ) UpperCAmelCase_ :List[str] = output.images UpperCAmelCase_ :int = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Optional[Any] = pipe( **__A , return_dict=__A , )[0] UpperCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ :Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ :int = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ ( self : List[Any] ): UpperCAmelCase_ :Optional[Any] = '''cpu''' UpperCAmelCase_ :Dict = self.get_dummy_components() UpperCAmelCase_ :Dict = self.pipeline_class(**__A ) UpperCAmelCase_ :List[Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Any = [ pipeline_inputs['''image'''], pipeline_inputs['''image'''], ] UpperCAmelCase_ :List[str] = pipe(**__A ) UpperCAmelCase_ :Any = output.images UpperCAmelCase_ :List[str] = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :List[Any] = [ tuple_pipeline_inputs['''image'''], tuple_pipeline_inputs['''image'''], ] UpperCAmelCase_ :List[Any] = pipe( **__A , return_dict=__A , )[0] UpperCAmelCase_ :int = image[0, -3:, -3:, -1] UpperCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) UpperCAmelCase_ :Optional[int] = np.array( [ 0.9_997, 0.9_989, 0.0_008, 0.0_021, 0.9_960, 0.0_018, 0.0_014, 0.0_002, 0.9_933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ ( self : str ): UpperCAmelCase_ :Optional[Any] = torch.device('''cpu''' ) class _snake_case : '''simple docstring''' UpperCamelCase__ =1 UpperCAmelCase_ :Optional[int] = self.get_dummy_components() UpperCAmelCase_ :str = self.pipeline_class(**__A ) UpperCAmelCase_ :Tuple = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ :Any = torch.Generator(device=__A ).manual_seed(0 ) UpperCAmelCase_ :Tuple = pipe.decoder.dtype UpperCAmelCase_ :str = 1 UpperCAmelCase_ :int = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) UpperCAmelCase_ :Optional[Any] = pipe.prepare_latents( __A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() ) UpperCAmelCase_ :List[Any] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) UpperCAmelCase_ :List[Any] = pipe.prepare_latents( __A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() ) UpperCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A , pil_image=__A ) UpperCAmelCase_ :Dict = pipe( **__A , decoder_latents=__A , super_res_latents=__A ).images UpperCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A , pil_image=__A ) # Don't pass image, instead pass embedding UpperCAmelCase_ :Union[str, Any] = pipeline_inputs.pop('''image''' ) UpperCAmelCase_ :Optional[Any] = pipe.image_encoder(__A ).image_embeds UpperCAmelCase_ :List[str] = pipe( **__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def snake_case_ ( self : Any ): UpperCAmelCase_ :Optional[Any] = torch_device == '''cpu''' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor UpperCAmelCase_ :List[str] = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=__A , expected_max_diff=__A ) @skip_mps def snake_case_ ( self : Optional[int] ): UpperCAmelCase_ :Union[str, Any] = torch_device == '''cpu''' UpperCAmelCase_ :Tuple = True UpperCAmelCase_ :Any = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] self._test_inference_batch_single_identical( test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , ) def snake_case_ ( self : Tuple ): UpperCAmelCase_ :Any = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes UpperCAmelCase_ :Optional[Any] = [2, 3] self._test_inference_batch_consistent( batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=__A ) @skip_mps def snake_case_ ( self : Optional[int] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def snake_case_ ( self : str ): return super().test_save_load_local() @skip_mps def snake_case_ ( self : str ): return super().test_save_load_optional_components() @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self : int ): UpperCAmelCase_ :Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' ) UpperCAmelCase_ :Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' ) UpperCAmelCase_ :List[Any] = UnCLIPImageVariationPipeline.from_pretrained( '''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa ) UpperCAmelCase_ :Dict = pipeline.to(__A ) pipeline.set_progress_bar_config(disable=__A ) UpperCAmelCase_ :List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase_ :Optional[Any] = pipeline( __A , generator=__A , output_type='''np''' , ) UpperCAmelCase_ :Tuple = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(__A , __A , 15 )
608
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _UpperCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0522, type=int) _UpperCamelCase = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _UpperCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _UpperCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _UpperCamelCase = [0] * args.vocab_size for k, v in counter.items(): _UpperCamelCase = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
179
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int = 42 _A :Optional[int] = 42 class A_ : def __init__( self : Optional[Any] , snake_case__ : int ): lowercase = [[] for _ in range(__A )] lowercase = size def __getitem__( self : Dict , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(__A , __A ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(__A , __A ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
428
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' def UpperCAmelCase_ (__a : list[int] ): """simple docstring""" _a : List[str] = len(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): for j in range(i + 1 , UpperCamelCase__ ): if numbers[j] < numbers[i]: _a, _a : List[str] = numbers[j], numbers[i] return numbers if __name__ == "__main__": __lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() __lowerCAmelCase = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
229
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
'''simple docstring''' from scipy.stats import pearsonr import datasets __lowerCAmelCase ="\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" __lowerCAmelCase ="\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n" __lowerCAmelCase ="\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> int: if return_pvalue: a_ = pearsonr(__A , __A ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__A , __A )[0] )}
697
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset UpperCAmelCase : Tuple = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) UpperCAmelCase : Optional[int] = dataset.iloc[:, 1:2].values UpperCAmelCase : List[str] = dataset.iloc[:, 2].values UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = train_test_split(X, y, test_size=0.2, random_state=0) UpperCAmelCase : List[str] = PolynomialFeatures(degree=4) UpperCAmelCase : Tuple = poly_reg.fit_transform(X) UpperCAmelCase : Any = LinearRegression() pol_reg.fit(X_poly, y) def _SCREAMING_SNAKE_CASE () -> str: '''simple docstring''' plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color="""red""" ) plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
567
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class _UpperCamelCase (UpperCamelCase__ ): def __init__( self , __UpperCamelCase )-> Tuple: __lowerCAmelCase = data def __iter__( self )-> int: for element in self.data: yield element def __lowerCAmelCase ( __snake_case=True ): __lowerCAmelCase = Accelerator(even_batches=UpperCamelCase__ ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case = False ): if iterable: __lowerCAmelCase = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) ) else: __lowerCAmelCase = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) ) __lowerCAmelCase = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ ) __lowerCAmelCase = accelerator.prepare(UpperCamelCase__ ) return dl def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): __lowerCAmelCase = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ ) __lowerCAmelCase = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __lowerCAmelCase ( ): __lowerCAmelCase = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __lowerCAmelCase ( ): __lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ ) verify_dataloader_batch_sizes( UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __lowerCAmelCase ( ): __lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ ) __lowerCAmelCase = torch.nn.Linear(1 , 1 ) __lowerCAmelCase = accelerator.prepare(UpperCamelCase__ ) __lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 ) __lowerCAmelCase = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(UpperCamelCase__ ): __lowerCAmelCase = ddp_model(batch[0].float() ) __lowerCAmelCase = output.sum() loss.backward() batch_idxs.append(UpperCamelCase__ ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __lowerCAmelCase ( __snake_case ): with warnings.catch_warnings(record=UpperCamelCase__ ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , UpperCamelCase__ ) assert "only supported for multi-GPU" in str(w[-1].message ) def __lowerCAmelCase ( ): __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ ) __lowerCAmelCase = torch.nn.Linear(1 , 1 ) __lowerCAmelCase = accelerator.prepare(UpperCamelCase__ ) __lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 ) __lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ): __lowerCAmelCase = train_dl.batch_sampler.even_batches __lowerCAmelCase = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __lowerCAmelCase ( ): __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ ) __lowerCAmelCase = torch.nn.Linear(1 , 1 ) __lowerCAmelCase = accelerator.prepare(UpperCamelCase__ ) create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ ) __lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ): __lowerCAmelCase = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __lowerCAmelCase ( ): __lowerCAmelCase = create_accelerator() __lowerCAmelCase = torch.nn.Linear(1 , 1 ) __lowerCAmelCase = accelerator.prepare(UpperCamelCase__ ) create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ ) with warnings.catch_warnings(record=UpperCamelCase__ ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ): pass assert issubclass(w[-1].category , UpperCamelCase__ ) assert "only supported for map-style datasets" in str(w[-1].message ) def __lowerCAmelCase ( ): __lowerCAmelCase = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) __lowerCAmelCase = accelerator.state.distributed_type __lowerCAmelCase = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ ) __lowerCAmelCase = original_state if __name__ == "__main__": main()
367
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor a__ : Optional[Any] = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Optional[int] , *a_ : Optional[Any] , **a_ : Optional[Any] ): """simple docstring""" warnings.warn( """The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DPTImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
165
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=14 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=0.0_2 , ) -> Union[str, Any]: lowercase__ : Optional[Any] = parent lowercase__ : List[Any] = batch_size lowercase__ : int = seq_length lowercase__ : Any = is_training lowercase__ : Any = use_input_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : List[Any] = use_labels lowercase__ : int = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[int] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : int = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : Optional[int] = attention_probs_dropout_prob lowercase__ : Dict = max_position_embeddings lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[Any] = None lowercase__ : Optional[Any] = vocab_size - 1 lowercase__ : Optional[Any] = vocab_size - 1 lowercase__ : Optional[Any] = vocab_size - 1 def _lowerCAmelCase( self ) -> Any: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : List[str] = None if self.use_input_mask: lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Union[str, Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _lowerCAmelCase( self ) -> Dict: lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : Tuple = 20 lowercase__ : Optional[Any] = model_class_name(__A ) lowercase__ : List[Any] = model.init_cache(input_ids.shape[0] , __A ) lowercase__ : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowercase__ : int = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowercase__ : int = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase__ : Any = model( input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , ) lowercase__ : Optional[Any] = model(__A ) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: lowercase__ : str = 20 lowercase__ : Dict = model_class_name(__A ) lowercase__ : int = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , __A ) lowercase__ : Union[str, Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) lowercase__ : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase__ : int = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , ) lowercase__ : Any = model(__A , attention_mask=__A ) lowercase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Union[str, Any] = FlaxGPTJModelTester(self ) def _lowerCAmelCase( self ) -> Optional[Any]: for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__A , __A , __A , __A ) def _lowerCAmelCase( self ) -> Optional[int]: for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __A , __A , __A , __A ) @tooslow def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Tuple = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) lowercase__ : Union[str, Any] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=__A , truncation=__A ) lowercase__ : Tuple = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowercase__ : List[str] = False lowercase__ : Any = model.config.eos_token_id lowercase__ : Union[str, Any] = jax.jit(model.generate ) lowercase__ : Tuple = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences lowercase__ : Any = tokenizer.batch_decode(__A , skip_special_tokens=__A ) lowercase__ : Any = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(__A , __A ) @is_pt_flax_cross_test def _lowerCAmelCase( self ) -> List[str]: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowercase__ : Any = self._prepare_for_class(__A , __A ) lowercase__ : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Union[str, Any] = getattr(__A , __A ) lowercase__ , lowercase__ : Union[str, Any] = pt_inputs['''input_ids'''].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): lowercase__ : Tuple = 0 lowercase__ : Union[str, Any] = 1 lowercase__ : Tuple = 0 lowercase__ : Tuple = 1 lowercase__ : List[str] = pt_model_class(__A ).eval() lowercase__ : Any = model_class(__A , dtype=jnp.floataa ) lowercase__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A ) lowercase__ : Any = fx_state with torch.no_grad(): lowercase__ : Tuple = pt_model(**__A ).to_tuple() lowercase__ : Optional[Any] = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__A ) lowercase__ : str = model_class.from_pretrained(__A , from_pt=__A ) lowercase__ : Tuple = fx_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowercase__ : int = self._prepare_for_class(__A , __A ) lowercase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Tuple = getattr(__A , __A ) lowercase__ : List[str] = pt_model_class(__A ).eval() lowercase__ : int = model_class(__A , dtype=jnp.floataa ) lowercase__ : List[str] = load_flax_weights_in_pytorch_model(__A , fx_model.params ) lowercase__ , lowercase__ : Tuple = pt_inputs['''input_ids'''].shape lowercase__ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): lowercase__ : List[str] = 0 lowercase__ : Optional[Any] = 1 lowercase__ : List[Any] = 0 lowercase__ : List[str] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Any = pt_model(**__A ).to_tuple() lowercase__ : List[str] = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__A ) lowercase__ : int = pt_model_class.from_pretrained(__A , from_flax=__A ) with torch.no_grad(): lowercase__ : Optional[int] = pt_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def _lowerCAmelCase( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : List[Any] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowercase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
152
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class SCREAMING_SNAKE_CASE__ : def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=6 , _lowercase=17 , _lowercase=23 , _lowercase=11 , _lowercase=True , ): '''simple docstring''' __a : Dict = parent __a : Tuple = batch_size __a : Any = seq_length __a : Optional[int] = act_dim __a : Optional[Any] = state_dim __a : Optional[int] = hidden_size __a : Optional[Any] = max_length __a : List[Any] = is_training def lowerCAmelCase__(self ): '''simple docstring''' __a : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) __a : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) __a : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) __a : Dict = floats_tensor((self.batch_size, self.seq_length, 1) ) __a : List[str] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) __a : Union[str, Any] = random_attention_mask((self.batch_size, self.seq_length) ) __a : Union[str, Any] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def lowerCAmelCase__(self ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' __a : str = DecisionTransformerModel(config=__A ) model.to(__A ) model.eval() __a : Any = model(__A , __A , __A , __A , __A , __A ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def lowerCAmelCase__(self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : str = config_and_inputs __a : Dict = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowerCAmelCase = (DecisionTransformerModel,) if is_torch_available() else () _lowerCAmelCase = () _lowerCAmelCase = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _lowerCAmelCase = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def lowerCAmelCase__(self ): '''simple docstring''' __a : List[Any] = DecisionTransformerModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__A , hidden_size=37 ) def lowerCAmelCase__(self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase__(self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) @slow def lowerCAmelCase__(self ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[int] = DecisionTransformerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowerCAmelCase__(self ): '''simple docstring''' __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Union[str, Any] = model_class(__A ) __a : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : Union[str, Any] = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(__A )] , __A ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def lowerCAmelCase__(self ): '''simple docstring''' __a : Optional[int] = 2 # number of steps of autoregressive prediction we will perform __a : str = 10 # defined by the RL environment, may be normalized __a : str = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" ) __a : List[Any] = model.to(__A ) __a : str = model.config torch.manual_seed(0 ) __a : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ) # env.reset() __a : Any = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__A ) __a : Union[str, Any] = torch.tensor(__A , device=__A , dtype=torch.floataa ).reshape(1 , 1 , 1 ) __a : Union[str, Any] = state __a : Any = torch.zeros(1 , 0 , config.act_dim , device=__A , dtype=torch.floataa ) __a : List[str] = torch.zeros(1 , 0 , device=__A , dtype=torch.floataa ) __a : List[Any] = torch.tensor(0 , device=__A , dtype=torch.long ).reshape(1 , 1 ) for step in range(__A ): __a : List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__A )] , dim=1 ) __a : List[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=__A )] , dim=1 ) __a : Optional[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): __a , __a , __a : Any = model( states=__A , actions=__A , rewards=__A , returns_to_go=__A , timesteps=__A , attention_mask=__A , return_dict=__A , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) __a , __a , __a , __a : List[str] = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ), 1.0, False, {}, ) __a : List[Any] = action_pred[0, -1] __a : Any = torch.cat([states, state] , dim=1 ) __a : Optional[Any] = returns_to_go[0, -1] - reward __a : List[str] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) __a : Dict = torch.cat( [timesteps, torch.ones((1, 1) , device=__A , dtype=torch.long ) * (step + 1)] , dim=1 )
581
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" snake_case__ : Union[str, Any] =UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) snake_case__ : Tuple =downstream_dict['''projector.weight'''] snake_case__ : int =downstream_dict['''projector.bias'''] snake_case__ : Any =downstream_dict['''model.post_net.linear.weight'''] snake_case__ : Union[str, Any] =downstream_dict['''model.post_net.linear.bias'''] return model def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" snake_case__ : List[Any] =UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) snake_case__ : Optional[Any] =downstream_dict['''model.linear.weight'''] snake_case__ : Optional[Any] =downstream_dict['''model.linear.bias'''] return model def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" snake_case__ : Optional[Any] =UniSpeechSatForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) snake_case__ : int =downstream_dict['''connector.weight'''] snake_case__ : Optional[Any] =downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case__ : Optional[int] =downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] snake_case__ : Dict =downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] snake_case__ : Tuple =downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] snake_case__ : Any =downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] snake_case__ : Dict =downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] snake_case__ : str =downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] snake_case__ : List[str] =downstream_dict['''objective.W'''] return model @torch.no_grad() def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" snake_case__ : int =torch.load(UpperCamelCase__ , map_location='''cpu''' ) snake_case__ : Optional[Any] =checkpoint['''Downstream'''] snake_case__ : List[str] =UniSpeechSatConfig.from_pretrained(UpperCamelCase__ ) snake_case__ : int =WavaVecaFeatureExtractor.from_pretrained( UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ ) snake_case__ : Tuple =hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): snake_case__ : List[str] =convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) elif arch.endswith('''ForAudioFrameClassification''' ): snake_case__ : Optional[int] =convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) elif arch.endswith('''ForXVector''' ): snake_case__ : str =convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: snake_case__ : Any =checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(UpperCamelCase__ ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowerCamelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
381
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) def a ( __snake_case : str, __snake_case : str ): '''simple docstring''' UpperCAmelCase_ :Tuple = RobertaPreLayerNormConfig.from_pretrained( UpperCamelCase__, architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict UpperCAmelCase_ :Dict = torch.load(hf_hub_download(repo_id=UpperCamelCase__, filename='''pytorch_model.bin''' ) ) UpperCAmelCase_ :List[str] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): UpperCAmelCase_ :List[str] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue UpperCAmelCase_ :Optional[int] = tensor_value UpperCAmelCase_ :Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=UpperCamelCase__, config=UpperCamelCase__, state_dict=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) # convert tokenizer UpperCAmelCase_ :str = AutoTokenizer.from_pretrained(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCamelCase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
608
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self :str ): __lowerCamelCase : Any =XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __lowerCamelCase : List[Any] =torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __lowerCamelCase : Dict =torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim __lowerCamelCase : int =torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __lowerCamelCase : Optional[int] =model(__A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , __A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) ) @slow def __lowercase ( self :Any ): __lowerCamelCase : List[str] =XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) __lowerCamelCase : Dict =torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __lowerCamelCase : int =torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim __lowerCamelCase : Optional[int] =torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __lowerCamelCase : List[Any] =model(__A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , __A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
179
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A_ ( unittest.TestCase , UpperCamelCase__ ): def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = load_tool("""text-to-speech""" ) self.tool.setup() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): torch.manual_seed(0 ) lowercase = self.tool("""hey""" ) lowercase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ): torch.manual_seed(0 ) lowercase = self.tool("""hey""" ) lowercase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
428
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase__ ) class UpperCAmelCase__ ( UpperCamelCase__ ): """simple docstring""" def __init__( self : List[str] ,**_a : Optional[int] ): '''simple docstring''' super().__init__(**__A ) requires_backends(self ,'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] ,_a : Union[str, List[str], "Image", List["Image"]] ,**_a : Any ): '''simple docstring''' return super().__call__(__A ,**__A ) def __lowercase ( self : Tuple ,**_a : List[Any] ): '''simple docstring''' _a : Optional[Any] = {} if "candidate_labels" in kwargs: _a : int = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _a : Union[str, Any] = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowercase ( self : List[Any] ,_a : Optional[Any] ,_a : str=None ,_a : Dict="This is a photo of {}." ): '''simple docstring''' _a : Union[str, Any] = load_image(__A ) _a : Union[str, Any] = self.image_processor(images=[image] ,return_tensors=self.framework ) _a : str = candidate_labels _a : Union[str, Any] = [hypothesis_template.format(__A ) for x in candidate_labels] _a : Optional[Any] = self.tokenizer(__A ,return_tensors=self.framework ,padding=__A ) _a : Any = [text_inputs] return inputs def __lowercase ( self : str ,_a : Dict ): '''simple docstring''' _a : Tuple = model_inputs.pop('candidate_labels' ) _a : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] ,__A ): _a : str = text_inputs[0] else: # Batching case. _a : int = text_inputs[0][0] _a : List[Any] = self.model(**__A ,**__A ) _a : str = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowercase ( self : Dict ,_a : Union[str, Any] ): '''simple docstring''' _a : Any = model_outputs.pop('candidate_labels' ) _a : Optional[Any] = model_outputs['logits'][0] if self.framework == "pt": _a : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) _a : Optional[Any] = probs.tolist() if not isinstance(__A ,__A ): _a : int = [scores] elif self.framework == "tf": _a : List[Any] = stable_softmax(__A ,axis=-1 ) _a : str = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) _a : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(__A ,__A ) ,key=lambda _a : -x[0] ) ] return result
229
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "nielsr/canine-s": 2048, } # Unicode defines 1,114,112 total “codepoints” __lowerCAmelCase =111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py __lowerCAmelCase =0 __lowerCAmelCase =0xe_0_0_0 __lowerCAmelCase =0xe_0_0_1 __lowerCAmelCase =0xe_0_0_2 __lowerCAmelCase =0xe_0_0_3 __lowerCAmelCase =0xe_0_0_4 # Maps special codepoints to human-readable names. __lowerCAmelCase ={ # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. __lowerCAmelCase ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _snake_case ( UpperCamelCase__ ): """simple docstring""" _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=chr(__A ) , UpperCAmelCase__=False , UpperCAmelCase__=2048 , **UpperCAmelCase__ , ) -> Union[str, Any]: a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. a_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): a_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. a_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } a_ = UNICODE_VOCAB_SIZE a_ = len(self._special_codepoints ) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self._unicode_vocab_size def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]: return list(__A ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int: try: return ord(__A ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any: return "".join(__A ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] a_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) a_ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] a_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Any: return ()
697
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } UpperCAmelCase : Optional[Any] = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } UpperCAmelCase : Optional[Any] = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE () -> str: '''simple docstring''' lowercase_ = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowercase_ = bs[:] lowercase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 lowercase_ = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase_ = set() lowercase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int="replace" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : Dict="<unk>" , lowerCAmelCase_ : Optional[Any]="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , lowerCAmelCase_ : List[Any]=False , **lowerCAmelCase_ : List[Any] , ): """simple docstring""" lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else bos_token lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else eos_token lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else sep_token lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else cls_token lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else unk_token lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase_ = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="""utf-8""") as vocab_handle: lowercase_ = json.load(__A) lowercase_ = {v: k for k, v in self.encoder.items()} lowercase_ = errors # how to handle errors in decoding lowercase_ = bytes_to_unicode() lowercase_ = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="""utf-8""") as merges_handle: lowercase_ = merges_handle.read().split("""\n""")[1:-1] lowercase_ = [tuple(merge.split()) for merge in bpe_merges] lowercase_ = dict(zip(__A , range(len(__A)))) lowercase_ = {} lowercase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase_ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self : Dict): """simple docstring""" return len(self.encoder) def _UpperCAmelCase ( self : Tuple): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any]): """simple docstring""" if token in self.cache: return self.cache[token] lowercase_ = tuple(__A) lowercase_ = get_pairs(__A) if not pairs: return token while True: lowercase_ = min(__A , key=lambda lowerCAmelCase_: self.bpe_ranks.get(__A , float("""inf"""))) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ = bigram lowercase_ = [] lowercase_ = 0 while i < len(__A): try: lowercase_ = word.index(__A , __A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) lowercase_ = j if word[i] == first and i < len(__A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowercase_ = tuple(__A) lowercase_ = new_word if len(__A) == 1: break else: lowercase_ = get_pairs(__A) lowercase_ = """ """.join(__A) lowercase_ = word return word def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]): """simple docstring""" lowercase_ = [] for token in re.findall(self.pat , __A): lowercase_ = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A).split(""" """)) return bpe_tokens def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]): """simple docstring""" return self.encoder.get(__A , self.encoder.get(self.unk_token)) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Tuple): """simple docstring""" return self.decoder.get(__A) def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple): """simple docstring""" lowercase_ = """""".join(__A) lowercase_ = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors) return text def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(__A): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return lowercase_ = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) lowercase_ = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""]) with open(__A , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A) + """\n""") lowercase_ = 0 with open(__A , """w""" , encoding="""utf-8""") as writer: writer.write("""#version: 0.2\n""") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_: kv[1]): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""") lowercase_ = token_index writer.write(""" """.join(__A) + """\n""") index += 1 return vocab_file, merge_file def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A) if token_ids_a is None: return [1] + ([0] * len(__A)) + [1] return [1] + ([0] * len(__A)) + [1, 1] + ([0] * len(__A)) + [1] def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int=False , **lowerCAmelCase_ : Optional[Any]): """simple docstring""" lowercase_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(__A) > 0 and not text[0].isspace()): lowercase_ = """ """ + text return (text, kwargs) def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : "Conversation"): """simple docstring""" lowercase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text) else: # Generated responses should contain them already. inputs.append(__A) lowercase_ = """ """.join(__A) lowercase_ = self.encode(__A) if len(__A) > self.model_max_length: lowercase_ = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''') return input_ids
567
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
def __lowerCAmelCase ( __snake_case ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __lowerCAmelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 0: return False __lowerCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
367
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
from typing import Dict, Optional import numpy as np import datasets a__ : Union[str, Any] = """\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n""" a__ : Optional[Any] = """\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n""" a__ : Optional[Any] = """\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}""" def snake_case (UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowerCamelCase__ = new_id # turn into Numpy arrays lowerCamelCase__ = np.array(UpperCamelCase__ ) lowerCamelCase__ = np.array(UpperCamelCase__ ) if reduce_labels: lowerCamelCase__ = 255 lowerCamelCase__ = label - 1 lowerCamelCase__ = 255 lowerCamelCase__ = label != ignore_index lowerCamelCase__ = np.not_equal(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ = pred_label[mask] lowerCamelCase__ = np.array(UpperCamelCase__ )[mask] lowerCamelCase__ = pred_label[pred_label == label] lowerCamelCase__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def snake_case (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ): '''simple docstring''' lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def snake_case (UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : bool , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # compute metrics lowerCamelCase__ = {} lowerCamelCase__ = total_area_intersect.sum() / total_area_label.sum() lowerCamelCase__ = total_area_intersect / total_area_union lowerCamelCase__ = total_area_intersect / total_area_label lowerCamelCase__ = np.nanmean(UpperCamelCase__ ) lowerCamelCase__ = np.nanmean(UpperCamelCase__ ) lowerCamelCase__ = all_acc lowerCamelCase__ = iou lowerCamelCase__ = acc if nan_to_num is not None: lowerCamelCase__ = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def _UpperCamelCase ( self : Any ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), } ) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def _UpperCamelCase ( self : Dict , a_ : Any , a_ : Dict , a_ : int , a_ : bool , a_ : Optional[int] = None , a_ : Optional[Dict[int, int]] = None , a_ : bool = False , ): """simple docstring""" lowerCamelCase__ = mean_iou( results=__A , gt_seg_maps=__A , num_labels=__A , ignore_index=__A , nan_to_num=__A , label_map=__A , reduce_labels=__A , ) return iou_result
165
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): lowercase__ : List[Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(__A ) def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Dict = '''sshleifer/tiny-gpt2''' lowercase__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , ) lowercase__ : str = TensorFlowBenchmark(__A ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : Any = '''sgugger/tiny-distilbert-classification''' lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , only_pretrain_model=__A , ) lowercase__ : str = TensorFlowBenchmark(__A ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : int = '''sshleifer/tiny-gpt2''' lowercase__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , ) lowercase__ : Dict = TensorFlowBenchmark(__A ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : str = '''sshleifer/tiny-gpt2''' lowercase__ : Optional[Any] = AutoConfig.from_pretrained(__A ) lowercase__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , ) lowercase__ : List[str] = TensorFlowBenchmark(__A , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Union[str, Any] = '''sshleifer/tiny-gpt2''' lowercase__ : Tuple = AutoConfig.from_pretrained(__A ) lowercase__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , ) lowercase__ : Dict = TensorFlowBenchmark(__A , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Dict = '''sshleifer/tiny-gpt2''' lowercase__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , ) lowercase__ : List[Any] = TensorFlowBenchmark(__A ) lowercase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : Dict = '''sshleifer/tiny-gpt2''' lowercase__ : str = AutoConfig.from_pretrained(__A ) lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , ) lowercase__ : Union[str, Any] = TensorFlowBenchmark(__A , [config] ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' lowercase__ : List[Any] = AutoConfig.from_pretrained(__A ) lowercase__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(__A , configs=[config] ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : int = '''sshleifer/tiny-gpt2''' lowercase__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__A , multi_process=__A , ) lowercase__ : List[Any] = TensorFlowBenchmark(__A ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCAmelCase( self ) -> Any: lowercase__ : Dict = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__A , save_to_csv=__A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__A , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__A , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__A , '''env.csv''' ) , multi_process=__A , ) lowercase__ : List[str] = TensorFlowBenchmark(__A ) benchmark.run() self.assertTrue(Path(os.path.join(__A , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__A , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__A , '''env.csv''' ) ).exists() ) def _lowerCAmelCase( self ) -> Any: lowercase__ : Optional[int] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(__lowerCAmelCase ): self.assertTrue(hasattr(__A , '''sequential''' ) ) self.assertTrue(hasattr(__A , '''cumulative''' ) ) self.assertTrue(hasattr(__A , '''current''' ) ) self.assertTrue(hasattr(__A , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__A , '''log.txt''' ) , log_print=__A , trace_memory_line_by_line=__A , eager_mode=__A , multi_process=__A , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(__A ) lowercase__ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__A , '''log.txt''' ) ).exists() )
152
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): _lowerCAmelCase = 4_2 class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__(self , _lowercase = 3 , _lowercase = 3 , _lowercase = ("DownEncoderBlock2D",) , _lowercase = ("UpDecoderBlock2D",) , _lowercase = (64,) , _lowercase = 1 , _lowercase = "silu" , _lowercase = 3 , _lowercase = 32 , _lowercase = 256 , _lowercase = 32 , _lowercase = None , _lowercase = 0.1_8215 , _lowercase = "group" , ): '''simple docstring''' super().__init__() # pass init params to Encoder __a : Optional[int] = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) __a : List[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels __a : int = nn.Convad(__A , __A , 1 ) __a : Optional[int] = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A ) __a : Union[str, Any] = nn.Convad(__A , __A , 1 ) # pass init params to Decoder __a : int = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def lowerCAmelCase__(self , _lowercase , _lowercase = True ): '''simple docstring''' __a : Dict = self.encoder(__A ) __a : Any = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def lowerCAmelCase__(self , _lowercase , _lowercase = False , _lowercase = True ): '''simple docstring''' if not force_not_quantize: __a , __a , __a : Union[str, Any] = self.quantize(__A ) else: __a : Union[str, Any] = h __a : Tuple = self.post_quant_conv(__A ) __a : List[Any] = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def lowerCAmelCase__(self , _lowercase , _lowercase = True ): '''simple docstring''' __a : Dict = sample __a : Any = self.encode(__A ).latents __a : Union[str, Any] = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
581
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
from torch import nn def lowercase_ ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
"""simple docstring""" __lowerCamelCase = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __lowerCamelCase = ["a", "b", "c", "d", "e"] def a ( __snake_case : str, __snake_case : Union[str, Any], __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ :Optional[Any] = start # add current to visited visited.append(UpperCamelCase__ ) UpperCAmelCase_ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: UpperCAmelCase_ :Optional[int] = topological_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # if all neighbors visited add current to sort sort.append(UpperCamelCase__ ) # if all vertices haven't been visited select a new one to visit if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): for vertice in vertices: if vertice not in visited: UpperCAmelCase_ :Dict = topological_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # return sort return sort if __name__ == "__main__": __lowerCamelCase = topological_sort("a", [], []) print(sort)
608
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : int = ["""image_processor""", """tokenizer"""] __snake_case : Union[str, Any] = """OwlViTImageProcessor""" __snake_case : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self :Optional[Any] , __lowercase :int=None , __lowercase :Optional[int]=None , **__lowercase :str ): __lowerCamelCase : str =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __A , ) __lowerCamelCase : str =kwargs.pop('''feature_extractor''' ) __lowerCamelCase : Optional[Any] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__A , __A ) def __call__( self :str , __lowercase :Dict=None , __lowercase :List[str]=None , __lowercase :str=None , __lowercase :Optional[int]="max_length" , __lowercase :Tuple="np" , **__lowercase :int ): if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): __lowerCamelCase : int =[self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): __lowerCamelCase : List[str] =[] # Maximum number of queries across batch __lowerCamelCase : Dict =max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: __lowerCamelCase : List[str] =t + [''' '''] * (max_num_queries - len(__A )) __lowerCamelCase : int =self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": __lowerCamelCase : str =np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __lowerCamelCase : Any =np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __lowerCamelCase : Union[str, Any] =jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __lowerCamelCase : List[str] =jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __lowerCamelCase : Dict =torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) __lowerCamelCase : int =torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __lowerCamelCase : List[Any] =tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __lowerCamelCase : Union[str, Any] =tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) __lowerCamelCase : Optional[Any] =BatchEncoding() __lowerCamelCase : str =input_ids __lowerCamelCase : List[Any] =attention_mask if query_images is not None: __lowerCamelCase : str =BatchEncoding() __lowerCamelCase : Tuple =self.image_processor( __A , return_tensors=__A , **__A ).pixel_values __lowerCamelCase : List[str] =query_pixel_values if images is not None: __lowerCamelCase : Union[str, Any] =self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: __lowerCamelCase : Optional[int] =image_features.pixel_values return encoding elif query_images is not None and images is not None: __lowerCamelCase : Optional[Any] =image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def __lowercase ( self :List[Any] , *__lowercase :Dict , **__lowercase :Dict ): return self.image_processor.post_process(*__A , **__A ) def __lowercase ( self :Optional[int] , *__lowercase :Dict , **__lowercase :List[str] ): return self.image_processor.post_process_object_detection(*__A , **__A ) def __lowercase ( self :str , *__lowercase :List[str] , **__lowercase :Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def __lowercase ( self :Dict , *__lowercase :List[str] , **__lowercase :List[str] ): return self.tokenizer.batch_decode(*__A , **__A ) def __lowercase ( self :Dict , *__lowercase :Dict , **__lowercase :List[str] ): return self.tokenizer.decode(*__A , **__A ) @property def __lowercase ( self :List[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , ) return self.image_processor_class @property def __lowercase ( self :Any ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , ) return self.image_processor
179
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ='''▁''' __SCREAMING_SNAKE_CASE : Dict ={ '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } __SCREAMING_SNAKE_CASE : int ={ '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''facebook/s2t-small-librispeech-asr''': 1_024, } __SCREAMING_SNAKE_CASE : Optional[int] =['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] __SCREAMING_SNAKE_CASE : Optional[Any] ={'''mustc''': MUSTC_LANGS} class A_ ( UpperCamelCase__ ): _A :Optional[int] = VOCAB_FILES_NAMES _A :Dict = PRETRAINED_VOCAB_FILES_MAP _A :List[str] = MAX_MODEL_INPUT_SIZES _A :Union[str, Any] = ['''input_ids''', '''attention_mask'''] _A :Optional[int] = [] def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[str]="<s>" , snake_case__ : str="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : List[Any]=False , snake_case__ : Tuple=False , snake_case__ : Optional[int]=None , snake_case__ : Dict=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : int , ): lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , do_upper_case=__A , do_lower_case=__A , tgt_lang=__A , lang_codes=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) lowercase = do_upper_case lowercase = do_lower_case lowercase = load_json(__A ) lowercase = {v: k for k, v in self.encoder.items()} lowercase = spm_file lowercase = load_spm(__A , self.sp_model_kwargs ) if lang_codes is not None: lowercase = lang_codes lowercase = LANGUAGES[lang_codes] lowercase = [F"""<lang:{lang}>""" for lang in self.langs] lowercase = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} lowercase = self.lang_tokens lowercase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowercase = {} @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return len(self.encoder ) @property def SCREAMING_SNAKE_CASE__ ( self : str ): return self._tgt_lang @tgt_lang.setter def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Optional[int] ): lowercase = new_tgt_lang self.set_tgt_lang_special_tokens(__A ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.lang_code_to_id[tgt_lang] lowercase = [lang_code_id] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str ): return self.sp_model.encode(__A , out_type=__A ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Optional[Any] ): return self.encoder.get(__A , self.encoder[self.unk_token] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int ): return self.decoder.get(__A , self.unk_token ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : List[str] ): lowercase = [] lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowercase = self.sp_model.decode(__A ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowercase = [] else: current_sub_tokens.append(__A ) lowercase = self.sp_model.decode(__A ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[int] , snake_case__ : Dict=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) lowercase = [1] * len(self.prefix_tokens ) lowercase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self : Tuple , snake_case__ : Dict ): lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = load_spm(self.spm_file , self.sp_model_kwargs ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ): lowercase = Path(__A ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" lowercase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) lowercase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __A ) if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __A ) elif not os.path.isfile(self.spm_file ): with open(__A , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(__A ) return (str(__A ), str(__A )) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ ) spm.Load(str(UpperCamelCase__ ) ) return spm def UpperCamelCase__ ( lowerCAmelCase__ ): with open(UpperCamelCase__ ,"""r""" ) as f: return json.load(UpperCamelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): with open(UpperCamelCase__ ,"""w""" ) as f: json.dump(UpperCamelCase__ ,UpperCamelCase__ ,indent=2 )
428
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } __lowerCAmelCase = logging.get_logger(__name__) class UpperCAmelCase__ ( UpperCamelCase__ ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = '''mask2former''' __UpperCAmelCase : int = ['''swin'''] __UpperCAmelCase : Dict = {'''hidden_size''': '''hidden_dim'''} def __init__( self : List[str] ,_a : Optional[Dict] = None ,_a : int = 256 ,_a : int = 256 ,_a : int = 256 ,_a : int = 1024 ,_a : str = "relu" ,_a : int = 6 ,_a : int = 10 ,_a : int = 8 ,_a : float = 0.0 ,_a : int = 2048 ,_a : bool = False ,_a : bool = False ,_a : int = 4 ,_a : int = 255 ,_a : int = 100 ,_a : float = 0.1 ,_a : float = 2.0 ,_a : float = 5.0 ,_a : float = 5.0 ,_a : int = 1_2544 ,_a : float = 3.0 ,_a : float = 0.75 ,_a : float = 0.02 ,_a : float = 1.0 ,_a : bool = True ,_a : List[int] = [4, 8, 16, 32] ,_a : bool = None ,**_a : Tuple ,): '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) _a : Any = CONFIG_MAPPING['swin']( image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=__A ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,) if isinstance(__A ,__A ): _a : str = backbone_config.pop('model_type' ) _a : Tuple = CONFIG_MAPPING[backbone_model_type] _a : Union[str, Any] = config_class.from_dict(__A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {','.join(self.backbones_supported )}""" ) _a : Optional[int] = backbone_config _a : Optional[int] = feature_size _a : Tuple = mask_feature_size _a : Union[str, Any] = hidden_dim _a : Tuple = encoder_feedforward_dim _a : List[Any] = activation_function _a : Any = encoder_layers _a : List[str] = decoder_layers _a : Dict = num_attention_heads _a : Dict = dropout _a : Optional[int] = dim_feedforward _a : Dict = pre_norm _a : List[Any] = enforce_input_projection _a : List[str] = common_stride _a : Tuple = ignore_value _a : List[Any] = num_queries _a : str = no_object_weight _a : Union[str, Any] = class_weight _a : Union[str, Any] = mask_weight _a : List[str] = dice_weight _a : Union[str, Any] = train_num_points _a : Optional[Any] = oversample_ratio _a : int = importance_sample_ratio _a : List[Any] = init_std _a : str = init_xavier_std _a : Dict = use_auxiliary_loss _a : Dict = feature_strides _a : List[str] = output_auxiliary_logits _a : List[str] = decoder_layers super().__init__(**__A ) @classmethod def __lowercase ( cls : str ,_a : PretrainedConfig ,**_a : int ): '''simple docstring''' return cls( backbone_config=__A ,**__A ,) def __lowercase ( self : Tuple ): '''simple docstring''' _a : int = copy.deepcopy(self.__dict__ ) _a : Union[str, Any] = self.backbone_config.to_dict() _a : int = self.__class__.model_type return output
229
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
697
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
"""simple docstring""" UpperCAmelCase : List[str] = 8.31_44_62 # Unit - J mol-1 K-1 def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
567
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
def __lowerCAmelCase ( __snake_case , __snake_case ): __lowerCAmelCase = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ): __lowerCAmelCase = 0 while b > 0: if b & 1: __lowerCAmelCase = ((res % c) + (a % c)) % c a += a b >>= 1 return res
367
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
from __future__ import annotations from typing import Any def snake_case (UpperCamelCase : list[Any] ): '''simple docstring''' create_state_space_tree(UpperCamelCase__ , [] , 0 ) def snake_case (UpperCamelCase : list[Any] , UpperCamelCase : list[Any] , UpperCamelCase : int ): '''simple docstring''' if index == len(UpperCamelCase__ ): print(UpperCamelCase__ ) return create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": a__ : List[str] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
165
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : List[Any] = len(UpperCamelCase__ ) lowercase__ : List[str] = sum(UpperCamelCase__ ) lowercase__ : Optional[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowercase__ : Dict = True for i in range(1 , s + 1 ): lowercase__ : List[str] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowercase__ : List[Any] = dp[i][j - 1] if arr[i - 1] <= j: lowercase__ : List[str] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowercase__ : Any = s - 2 * j break return diff
152
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase__ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
581
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" lowerCAmelCase__ =['''image_processor''', '''tokenizer'''] lowerCAmelCase__ ='''LayoutLMv3ImageProcessor''' lowerCAmelCase__ =('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" snake_case__ : Any =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __A , ) snake_case__ : str =kwargs.pop('''feature_extractor''' ) snake_case__ : List[str] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__A , __A ) def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchEncoding: """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) # first, apply the image processor snake_case__ : Union[str, Any] =self.image_processor(images=__A , return_tensors=__A ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__A , __A ): snake_case__ : List[str] =[text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ : str =features['''words'''] snake_case__ : Tuple =self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel values snake_case__ : Dict =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: snake_case__ : Union[str, Any] =self.get_overflowing_images(__A , encoded_inputs['''overflow_to_sample_mapping'''] ) snake_case__ : str =images return encoded_inputs def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" snake_case__ : List[str] =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__A ) != len(__A ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(__A )} and {len(__A )}''' ) return images_with_overflow def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def UpperCAmelCase ( self ) -> Tuple: """simple docstring""" return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , ) return self.image_processor_class @property def UpperCAmelCase ( self ) -> Any: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , ) return self.image_processor
381
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" import string def a ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ :Optional[int] = '''''' for i in sequence: UpperCAmelCase_ :str = ord(UpperCamelCase__ ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def a ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = string.ascii_letters UpperCAmelCase_ :List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence ) def a ( ): '''simple docstring''' from timeit import timeit print('''Running performance benchmarks...''' ) UpperCAmelCase_ :Optional[int] = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(f'> atbash_slow(): {timeit("atbash_slow(printable)", setup=UpperCamelCase__ )} seconds' ) print(f'> atbash(): {timeit("atbash(printable)", setup=UpperCamelCase__ )} seconds' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
608
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _UpperCamelCase = None _UpperCamelCase = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _UpperCamelCase = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class SCREAMING_SNAKE_CASE_ : """simple docstring""" __snake_case : List[str] = True __snake_case : str = None # Automatically constructed __snake_case : Optional[Any] = """PIL.Image.Image""" __snake_case : str = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) __snake_case : Optional[Any] = field(default="""Image""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__( self :Optional[Any] ): return self.pa_type def __lowercase ( self :int , __lowercase :Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(__A , __A ): __lowerCamelCase : Any =np.array(__A ) if isinstance(__A , __A ): return {"path": value, "bytes": None} elif isinstance(__A , __A ): return {"path": None, "bytes": value} elif isinstance(__A , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(__A ) elif isinstance(__A , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(__A ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' ) def __lowercase ( self :Tuple , __lowercase :dict , __lowercase :Optional[Any]=None ): if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: __lowerCamelCase : Optional[Any] ={} __lowerCamelCase , __lowerCamelCase : List[Any] =value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' ) else: if is_local_path(__A ): __lowerCamelCase : Dict =PIL.Image.open(__A ) else: __lowerCamelCase : Union[str, Any] =path.split('''::''' )[-1] try: __lowerCamelCase : Dict =string_to_dict(__A , config.HUB_DATASETS_URL )['''repo_id'''] __lowerCamelCase : Optional[int] =token_per_repo_id.get(__A ) except ValueError: __lowerCamelCase : Optional[Any] =None with xopen(__A , '''rb''' , use_auth_token=__A ) as f: __lowerCamelCase : Any =BytesIO(f.read() ) __lowerCamelCase : List[str] =PIL.Image.open(bytes_ ) else: __lowerCamelCase : Optional[int] =PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self :str ): from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def __lowercase ( self :List[Any] , __lowercase :Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): __lowerCamelCase : Tuple =pa.array([None] * len(__A ) , type=pa.binary() ) __lowerCamelCase : List[str] =pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __lowerCamelCase : str =pa.array([None] * len(__A ) , type=pa.string() ) __lowerCamelCase : List[str] =pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: __lowerCamelCase : str =storage.field('''bytes''' ) else: __lowerCamelCase : List[str] =pa.array([None] * len(__A ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: __lowerCamelCase : Union[str, Any] =storage.field('''path''' ) else: __lowerCamelCase : str =pa.array([None] * len(__A ) , type=pa.string() ) __lowerCamelCase : str =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): __lowerCamelCase : Optional[Any] =pa.array( [encode_np_array(np.array(__A ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) __lowerCamelCase : str =pa.array([None] * len(__A ) , type=pa.string() ) __lowerCamelCase : Any =pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(__A , self.pa_type ) def __lowercase ( self :Tuple , __lowercase :pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__lowercase :Tuple ): with xopen(__A , '''rb''' ) as f: __lowerCamelCase : Tuple =f.read() return bytes_ __lowerCamelCase : Optional[int] =pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __lowerCamelCase : int =pa.array( [os.path.basename(__A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) __lowerCamelCase : str =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(__A , self.pa_type ) def lowerCAmelCase_ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() __lowerCamelCase : Optional[int] =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : "PIL.Image.Image" ): '''simple docstring''' __lowerCamelCase : List[Any] =BytesIO() if image.format in list_image_compression_formats(): __lowerCamelCase : Optional[Any] =image.format else: __lowerCamelCase : Optional[Any] ='''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(UpperCamelCase__ , format=UpperCamelCase__ ) return buffer.getvalue() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : "PIL.Image.Image" ): '''simple docstring''' if hasattr(UpperCamelCase__ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) __lowerCamelCase : str =array.dtype __lowerCamelCase : Tuple =dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER __lowerCamelCase : Optional[Any] =dtype.kind __lowerCamelCase : Tuple =dtype.itemsize __lowerCamelCase : Optional[Any] =None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: __lowerCamelCase : str =np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' ) if dtype is not dest_dtype: warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: __lowerCamelCase : List[Any] =dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: __lowerCamelCase : Optional[Any] =dtype_byteorder + dtype_kind + str(UpperCamelCase__ ) __lowerCamelCase : List[Any] =np.dtype(UpperCamelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' ) __lowerCamelCase : str =PIL.Image.fromarray(array.astype(UpperCamelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: __lowerCamelCase , __lowerCamelCase : Union[str, Any] =first_non_null_value(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCamelCase__ , np.ndarray ): __lowerCamelCase : List[Any] =no_op_if_value_is_null(UpperCamelCase__ ) return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs] elif isinstance(UpperCamelCase__ , PIL.Image.Image ): __lowerCamelCase : List[str] =no_op_if_value_is_null(UpperCamelCase__ ) return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs] else: return objs else: return objs
179
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Tuple =[ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowercase = """lm_head""" lowercase = getattr(UpperCamelCase__ ,UpperCamelCase__ ) if weight_type is not None: lowercase = getattr(UpperCamelCase__ ,UpperCamelCase__ ).shape else: lowercase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value else: lowercase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] lowercase = fairseq_model.state_dict() lowercase = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowercase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): lowercase = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase = True if "*" in mapped_key: lowercase = name.split(UpperCamelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,UpperCamelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase = """weight""" else: lowercase = None set_recursively(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=True ): if config_path is not None: lowercase = UniSpeechConfig.from_pretrained(UpperCamelCase__ ) else: lowercase = UniSpeechConfig() if is_finetuned: if dict_path: lowercase = Dictionary.load_from_json(UpperCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase = target_dict.pad_index lowercase = target_dict.bos_index lowercase = target_dict.eos_index lowercase = len(target_dict.symbols ) lowercase = os.path.join(UpperCamelCase__ ,"""vocab.json""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase__ ) ) return os.makedirs(UpperCamelCase__ ,exist_ok=UpperCamelCase__ ) lowercase = target_dict.indices # fairseq has the <pad> and <s> switched lowercase = 42 lowercase = 43 with open(UpperCamelCase__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(UpperCamelCase__ ,UpperCamelCase__ ) lowercase = WavaVecaPhonemeCTCTokenizer( UpperCamelCase__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=UpperCamelCase__ ,) lowercase = True if config.feat_extract_norm == """layer""" else False lowercase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,) lowercase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) lowercase = UniSpeechForCTC(UpperCamelCase__ ) else: lowercase = UniSpeechForPreTraining(UpperCamelCase__ ) if is_finetuned: lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowercase = model[0].eval() recursively_load_weights(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) hf_unispeech.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __SCREAMING_SNAKE_CASE : Any =parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
428
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
'''simple docstring''' import operator def UpperCAmelCase_ (__a : list , __a : bool = False , __a : list | None = None ): """simple docstring""" _a : Tuple = operator.lt if reverse else operator.gt _a : Optional[int] = solution or [] if not arr: return solution _a : Any = [arr.pop(0 )] for i, item in enumerate(UpperCamelCase__ ): if _operator(UpperCamelCase__ , sublist[-1] ): sublist.append(UpperCamelCase__ ) arr.pop(UpperCamelCase__ ) # merging sublist into solution list if not solution: solution.extend(UpperCamelCase__ ) else: while sublist: _a : List[str] = sublist.pop(0 ) for i, xx in enumerate(UpperCamelCase__ ): if not _operator(UpperCamelCase__ , UpperCamelCase__ ): solution.insert(UpperCamelCase__ , UpperCamelCase__ ) break else: solution.append(UpperCamelCase__ ) strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
229
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> Any: """simple docstring""" create_state_space_tree(UpperCamelCase__ , [] , 0 , [0 for i in range(len(UpperCamelCase__ ) )] ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[Any]: """simple docstring""" if index == len(UpperCamelCase__ ): print(UpperCamelCase__ ) return for i in range(len(UpperCamelCase__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) a_ = True create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ ) current_sequence.pop() a_ = False __lowerCAmelCase =[3, 1, 2, 4] generate_all_permutations(sequence) __lowerCAmelCase =["A", "B", "C"] generate_all_permutations(sequence_a)
697
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : List[str] = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowercase_ = k.replace(UpperCamelCase__ , UpperCamelCase__ ) if k.startswith("""encoder""" ): lowercase_ = k.replace(""".attn""" , """.self_attn""" ) lowercase_ = k.replace("""norm1""" , """self_attn_layer_norm""" ) lowercase_ = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): lowercase_ = k.replace("""norm1""" , """self_attn_layer_norm""" ) lowercase_ = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) lowercase_ = k.replace("""norm3""" , """final_layer_norm""" ) return k def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str: '''simple docstring''' lowercase_ = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: lowercase_ = sd.pop(UpperCamelCase__ ) lowercase_ = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd lowercase_ = v UpperCAmelCase : List[str] = ["START"] @torch.no_grad() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' lowercase_ = torch.load(UpperCamelCase__ , map_location="""cpu""" ) lowercase_ = model["""model"""] lowercase_ = BlenderbotConfig.from_json_file(UpperCamelCase__ ) lowercase_ = BlenderbotForConditionalGeneration(UpperCamelCase__ ) lowercase_ = m.model.state_dict().keys() lowercase_ = [] lowercase_ = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowercase_ = rename_state_dict_key(UpperCamelCase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowercase_ = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCamelCase__ ) m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) m.half() m.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) UpperCAmelCase : int = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
567
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') lowerCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class _UpperCamelCase : snake_case_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) snake_case_ = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case_ = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) snake_case_ = field( default=UpperCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) @dataclass class _UpperCamelCase : snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) snake_case_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case_ = field( default=UpperCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) snake_case_ = field( default=UpperCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __lowerCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __lowerCAmelCase = load_dataset( "xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __lowerCAmelCase = load_dataset( "xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = train_dataset.features["label"].names if training_args.do_eval: __lowerCAmelCase = load_dataset( "xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = eval_dataset.features["label"].names if training_args.do_predict: __lowerCAmelCase = load_dataset( "xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = predict_dataset.features["label"].names # Labels __lowerCAmelCase = len(UpperCamelCase__ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel={str(UpperCamelCase__ ): label for i, label in enumerate(UpperCamelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False def preprocess_function(__snake_case ): # Tokenize the texts return tokenizer( examples["premise"] , examples["hypothesis"] , padding=UpperCamelCase__ , max_length=data_args.max_seq_length , truncation=UpperCamelCase__ , ) if training_args.do_train: if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): __lowerCAmelCase = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , ) # Log a few random samples from the training set: for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: __lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) __lowerCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): __lowerCAmelCase = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_predict_samples ) __lowerCAmelCase = predict_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="prediction dataset map pre-processing" ): __lowerCAmelCase = predict_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , ) # Get the metric function __lowerCAmelCase = evaluate.load("xnli" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__snake_case ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions __lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return metric.compute(predictions=UpperCamelCase__ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) __lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=UpperCamelCase__ ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) __lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) # Prediction if training_args.do_predict: logger.info("*** Predict ***" ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = trainer.predict(UpperCamelCase__ , metric_key_prefix="predict" ) __lowerCAmelCase = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase__ ) ) __lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("predict" , UpperCamelCase__ ) trainer.save_metrics("predict" , UpperCamelCase__ ) __lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , "predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCamelCase__ , "w" ) as writer: writer.write("index\tprediction\n" ) for index, item in enumerate(UpperCamelCase__ ): __lowerCAmelCase = label_list[item] writer.write(F"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
367
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def snake_case (UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ = FunnelConfig.from_json_file(UpperCamelCase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase__ = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not.""" ) a__ : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
165
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __a: Optional[Any] = logging.get_logger(__name__) __a: Union[str, Any] = { """Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""", # See all Marian models at https://huggingface.co/models?filter=marian } class UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "marian" SCREAMING_SNAKE_CASE = ["past_key_values"] SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __lowerCAmelCase=58101 , __lowerCAmelCase=None , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=58100 , __lowerCAmelCase=False , __lowerCAmelCase=58100 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> Dict: lowercase__ : Dict = vocab_size lowercase__ : Tuple = decoder_vocab_size or vocab_size lowercase__ : Dict = max_position_embeddings lowercase__ : Tuple = d_model lowercase__ : Tuple = encoder_ffn_dim lowercase__ : List[Any] = encoder_layers lowercase__ : Dict = encoder_attention_heads lowercase__ : List[str] = decoder_ffn_dim lowercase__ : str = decoder_layers lowercase__ : str = decoder_attention_heads lowercase__ : List[Any] = dropout lowercase__ : Optional[Any] = attention_dropout lowercase__ : Union[str, Any] = activation_dropout lowercase__ : str = activation_function lowercase__ : Any = init_std lowercase__ : Optional[Any] = encoder_layerdrop lowercase__ : str = decoder_layerdrop lowercase__ : int = use_cache lowercase__ : List[Any] = encoder_layers lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ : Optional[Any] = share_encoder_decoder_embeddings super().__init__( pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , ) class UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowercase__ : List[str] = {0: '''batch'''} lowercase__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowercase__ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''} lowercase__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__A , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase__ : str = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowercase__ , lowercase__ : int = self.num_layers for i in range(__A ): lowercase__ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''} lowercase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowercase__ : Union[str, Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Any = super().outputs else: lowercase__ : str = super(__A , self ).outputs if self.use_past: lowercase__ , lowercase__ : Optional[Any] = self.num_layers for i in range(__A ): lowercase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowercase__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ) -> Mapping[str, Any]: lowercase__ : str = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) # Generate decoder inputs lowercase__ : Any = seq_length if not self.use_past else 1 lowercase__ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) lowercase__ : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowercase__ : List[str] = dict(**__A , **__A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ , lowercase__ : Tuple = common_inputs['''input_ids'''].shape lowercase__ : Any = common_inputs['''decoder_input_ids'''].shape[1] lowercase__ , lowercase__ : Any = self.num_attention_heads lowercase__ : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : Union[str, Any] = decoder_seq_length + 3 lowercase__ : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__A , __A )] , dim=1 ) lowercase__ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase__ , lowercase__ : Dict = self.num_layers lowercase__ : Union[str, Any] = min(__A , __A ) lowercase__ : str = max(__A , __A ) - min_num_layers lowercase__ : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__A ): common_inputs["past_key_values"].append( ( torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), ) ) # TODO: test this. lowercase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__A , __A ): common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) ) return common_inputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ) -> Mapping[str, Any]: lowercase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ , lowercase__ : int = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__ : str = seqlen + 2 lowercase__ , lowercase__ : List[str] = self.num_layers lowercase__ , lowercase__ : Optional[int] = self.num_attention_heads lowercase__ : List[str] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : Dict = common_inputs['''attention_mask'''].dtype lowercase__ : Optional[int] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__A , __A , dtype=__A )] , dim=1 ) lowercase__ : str = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A ) ] return common_inputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ) -> Mapping[str, Any]: lowercase__ : Dict = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase__ : Tuple = tokenizer.num_special_tokens_to_add(__A ) lowercase__ : Tuple = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence lowercase__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase__ : Any = dict(tokenizer(__A , return_tensors=__A ) ) return common_inputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) else: lowercase__ : List[Any] = self._generate_dummy_inputs_for_causal_lm( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) return common_inputs def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Optional[int] = super()._flatten_past_key_values_(__A , __A , __A , __A ) else: lowercase__ : Optional[Any] = super(__A , self )._flatten_past_key_values_( __A , __A , __A , __A ) @property def _lowerCAmelCase( self ) -> float: return 1E-4
152
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration lowercase__ = "facebook/wmt19-en-de" lowercase__ = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model lowercase__ = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) lowercase__ = FSMTForConditionalGeneration(config) print(f'num of params {tiny_model.num_parameters()}') # Test lowercase__ = tokenizer(["Making tiny model"], return_tensors="pt") lowercase__ = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save lowercase__ = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-de
581
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCamelCase__ = ['''gpt2'''] lowerCamelCase__ = '''gpt2''' if is_tf_available(): class _lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__() snake_case__ : str =tokenizer snake_case__ : List[Any] =AutoConfig.from_pretrained(__A ) snake_case__ : Union[str, Any] =TFGPTaLMHeadModel.from_config(__A ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] =self.tokenizer(__A ) snake_case__ : Optional[Any] =tokenized['''input_ids'''].to_tensor() snake_case__ : Optional[int] =tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case__ : Union[str, Any] =self.model(input_ids=__A , attention_mask=__A )['''logits'''] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Dict: """simple docstring""" super().setUp() snake_case__ : Union[str, Any] =[GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case__ : List[str] =[TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) snake_case__ : str =[ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] snake_case__ : List[Any] =list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCAmelCase ( self ) -> Any: """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: snake_case__ : Any =tokenizer([test_inputs] , return_tensors='''tf''' ) snake_case__ : List[Any] =tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case__ : List[Any] =python_outputs[key].numpy() snake_case__ : Tuple =tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) ) @slow def UpperCAmelCase ( self ) -> int: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: snake_case__ : Tuple =tf.function(__A ) for test_inputs in self.test_sentences: snake_case__ : Tuple =tf.constant(__A ) snake_case__ : Tuple =compiled_tokenizer(__A ) snake_case__ : str =tf_tokenizer(__A ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCAmelCase ( self ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: snake_case__ : List[str] =ModelToSave(tokenizer=__A ) snake_case__ : Union[str, Any] =tf.convert_to_tensor([self.test_sentences[0]] ) snake_case__ : List[str] =model.serving(__A ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case__ : Optional[int] =Path(__A ) / '''saved.model''' tf.saved_model.save(__A , __A , signatures={'''serving_default''': model.serving} ) snake_case__ : Tuple =tf.saved_model.load(__A ) snake_case__ : Union[str, Any] =loaded_model.signatures['''serving_default'''](__A )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def UpperCAmelCase ( self ) -> int: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: snake_case__ : Optional[Any] =tf.convert_to_tensor([self.test_sentences[0]] ) snake_case__ : Dict =tf_tokenizer(__A ) # Build model with some sample inputs snake_case__ : List[Any] =tf_tokenizer.get_config() snake_case__ : Any =TFGPTaTokenizer.from_config(__A ) snake_case__ : Optional[Any] =model_from_config(__A ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def UpperCAmelCase ( self ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case__ : Union[str, Any] =12_3123 for max_length in [3, 5, 1024]: snake_case__ : Union[str, Any] =tf.convert_to_tensor([self.test_sentences[0]] ) snake_case__ : Optional[int] =tf_tokenizer(__A , max_length=__A ) snake_case__ : Optional[int] =out['''input_ids'''].numpy().shape[1] assert out_length == max_length
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case ( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Optional[Any] = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ :Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on UpperCAmelCase_ :int = dict(zip(__A , range(len(__A ) ) ) ) UpperCAmelCase_ :List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] UpperCAmelCase_ :Optional[Any] = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) UpperCAmelCase_ :Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } UpperCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__A , __A ) def snake_case_ ( self : Optional[Any] , **snake_case : Optional[int] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__A ) def snake_case_ ( self : Optional[int] , **snake_case : List[str] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__A ) def snake_case_ ( self : str , **snake_case : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__A ) def snake_case_ ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : str ): UpperCAmelCase_ :Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase_ :Optional[int] = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ): UpperCAmelCase_ :Optional[int] = self.get_tokenizer() UpperCAmelCase_ :List[str] = self.get_rust_tokenizer() UpperCAmelCase_ :Union[str, Any] = self.get_image_processor() UpperCAmelCase_ :Any = CLIPProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ :List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) UpperCAmelCase_ :Optional[int] = CLIPProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ :int = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def snake_case_ ( self : Tuple ): UpperCAmelCase_ :Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ :int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase_ :Optional[Any] = self.get_image_processor(do_normalize=__A , padding_value=1.0 ) UpperCAmelCase_ :Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def snake_case_ ( self : Any ): UpperCAmelCase_ :Any = self.get_image_processor() UpperCAmelCase_ :Any = self.get_tokenizer() UpperCAmelCase_ :Optional[Any] = CLIPProcessor(tokenizer=__A , image_processor=__A ) UpperCAmelCase_ :List[Any] = self.prepare_image_inputs() UpperCAmelCase_ :Optional[int] = image_processor(__A , return_tensors='''np''' ) UpperCAmelCase_ :Union[str, Any] = processor(images=__A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Optional[int] = self.get_image_processor() UpperCAmelCase_ :List[str] = self.get_tokenizer() UpperCAmelCase_ :List[str] = CLIPProcessor(tokenizer=__A , image_processor=__A ) UpperCAmelCase_ :Tuple = '''lower newer''' UpperCAmelCase_ :Tuple = processor(text=__A ) UpperCAmelCase_ :str = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : Any ): UpperCAmelCase_ :Dict = self.get_image_processor() UpperCAmelCase_ :Any = self.get_tokenizer() UpperCAmelCase_ :str = CLIPProcessor(tokenizer=__A , image_processor=__A ) UpperCAmelCase_ :List[Any] = '''lower newer''' UpperCAmelCase_ :Optional[Any] = self.prepare_image_inputs() UpperCAmelCase_ :Tuple = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def snake_case_ ( self : List[Any] ): UpperCAmelCase_ :List[str] = self.get_image_processor() UpperCAmelCase_ :int = self.get_tokenizer() UpperCAmelCase_ :int = CLIPProcessor(tokenizer=__A , image_processor=__A ) UpperCAmelCase_ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ :List[str] = processor.batch_decode(__A ) UpperCAmelCase_ :Dict = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Any = self.get_image_processor() UpperCAmelCase_ :Optional[int] = self.get_tokenizer() UpperCAmelCase_ :str = CLIPProcessor(tokenizer=__A , image_processor=__A ) UpperCAmelCase_ :Any = '''lower newer''' UpperCAmelCase_ :Optional[Any] = self.prepare_image_inputs() UpperCAmelCase_ :Optional[Any] = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
608
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _UpperCamelCase = None _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _UpperCamelCase = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } _UpperCamelCase = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } _UpperCamelCase = '▁' class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : str = VOCAB_FILES_NAMES __snake_case : int = PRETRAINED_VOCAB_FILES_MAP __snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : Dict = BigBirdTokenizer __snake_case : int = ["""input_ids""", """attention_mask"""] __snake_case : int = [] def __init__( self :str , __lowercase :Tuple=None , __lowercase :Optional[Any]=None , __lowercase :Any="<unk>" , __lowercase :Any="<s>" , __lowercase :Union[str, Any]="</s>" , __lowercase :List[str]="<pad>" , __lowercase :Optional[Any]="[SEP]" , __lowercase :str="[MASK]" , __lowercase :Optional[Any]="[CLS]" , **__lowercase :Union[str, Any] , ): __lowerCamelCase : Optional[Any] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token __lowerCamelCase : Any =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token __lowerCamelCase : Any =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token __lowerCamelCase : str =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token __lowerCamelCase : int =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token __lowerCamelCase : Optional[int] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Optional[int] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) __lowerCamelCase : Optional[int] =vocab_file __lowerCamelCase : Optional[Any] =False if not self.vocab_file else True def __lowercase ( self :List[Any] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ): __lowerCamelCase : int =[self.sep_token_id] __lowerCamelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self :Optional[int] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None , __lowercase :bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def __lowercase ( self :Optional[Any] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ): __lowerCamelCase : Dict =[self.sep_token_id] __lowerCamelCase : Tuple =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self :int , __lowercase :str , __lowercase :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowerCamelCase : Tuple =os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
179
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any ={ '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class A_ ( UpperCamelCase__ ): _A :int = '''efficientnet''' def __init__( self : Tuple , snake_case__ : int = 3 , snake_case__ : int = 6_00 , snake_case__ : float = 2.0 , snake_case__ : float = 3.1 , snake_case__ : int = 8 , snake_case__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , snake_case__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , snake_case__ : List[int] = [] , snake_case__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case__ : float = 0.25 , snake_case__ : str = "swish" , snake_case__ : int = 25_60 , snake_case__ : str = "mean" , snake_case__ : float = 0.02 , snake_case__ : float = 0.001 , snake_case__ : float = 0.99 , snake_case__ : float = 0.5 , snake_case__ : float = 0.2 , **snake_case__ : int , ): super().__init__(**__A ) lowercase = num_channels lowercase = image_size lowercase = width_coefficient lowercase = depth_coefficient lowercase = depth_divisor lowercase = kernel_sizes lowercase = in_channels lowercase = out_channels lowercase = depthwise_padding lowercase = strides lowercase = num_block_repeats lowercase = expand_ratios lowercase = squeeze_expansion_ratio lowercase = hidden_act lowercase = hidden_dim lowercase = pooling_type lowercase = initializer_range lowercase = batch_norm_eps lowercase = batch_norm_momentum lowercase = dropout_rate lowercase = drop_connect_rate lowercase = sum(__A ) * 4 class A_ ( UpperCamelCase__ ): _A :Optional[Any] = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return 1E-5
428
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCAmelCase__ ( UpperCamelCase__ ): """simple docstring""" __UpperCAmelCase : str = 42 __UpperCAmelCase : Union[str, Any] = jnp.floataa __UpperCAmelCase : List[Any] = True def __lowercase ( self : Tuple ): '''simple docstring''' super().setup() _a : List[str] = nn.Dense(5 ,dtype=self.dtype ) def __call__( self : List[Any] ,*_a : int ,**_a : Optional[Any] ): '''simple docstring''' _a : Optional[Any] = super().__call__(*__A ,**__A ) _a : Tuple = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCAmelCase__ ( UpperCamelCase__ ): """simple docstring""" __UpperCAmelCase : List[Any] = FlaxBigBirdForNaturalQuestionsModule def UpperCAmelCase_ (__a : List[Any] , __a : List[Any] , __a : Optional[int] , __a : Tuple , __a : Union[str, Any] , __a : Tuple ): """simple docstring""" def cross_entropy(__a : List[str] , __a : List[str] , __a : List[str]=None ): _a : List[Any] = logits.shape[-1] _a : Dict = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype('f4' ) _a : str = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) _a : Optional[Any] = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: _a : List[Any] = reduction(UpperCamelCase__ ) return loss _a : Optional[Any] = partial(UpperCamelCase__ , reduction=jnp.mean ) _a : Optional[Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) _a : str = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) _a : List[str] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCAmelCase__ : """simple docstring""" __UpperCAmelCase : str = '''google/bigbird-roberta-base''' __UpperCAmelCase : Optional[Any] = 3000 __UpperCAmelCase : List[Any] = 1_0500 __UpperCAmelCase : Optional[int] = 128 __UpperCAmelCase : int = 3 __UpperCAmelCase : Any = 1 __UpperCAmelCase : List[Any] = 5 # tx_args __UpperCAmelCase : Optional[int] = 3e-5 __UpperCAmelCase : List[Any] = 0.0 __UpperCAmelCase : Tuple = 2_0000 __UpperCAmelCase : Any = 0.0_0_9_5 __UpperCAmelCase : int = '''bigbird-roberta-natural-questions''' __UpperCAmelCase : Any = '''training-expt''' __UpperCAmelCase : int = '''data/nq-training.jsonl''' __UpperCAmelCase : int = '''data/nq-validation.jsonl''' def __lowercase ( self : str ): '''simple docstring''' os.makedirs(self.base_dir ,exist_ok=__A ) _a : int = os.path.join(self.base_dir ,self.save_dir ) _a : str = self.batch_size_per_device * jax.device_count() @dataclass class UpperCAmelCase__ : """simple docstring""" __UpperCAmelCase : Optional[int] = 42 __UpperCAmelCase : int = 4096 # no dynamic padding on TPUs def __call__( self : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' _a : str = self.collate_fn(__A ) _a : List[str] = jax.tree_util.tree_map(__A ,__A ) return batch def __lowercase ( self : List[Any] ,_a : Union[str, Any] ): '''simple docstring''' _a, _a : Optional[int] = self.fetch_inputs(features['input_ids'] ) _a : List[str] = { 'input_ids': jnp.array(__A ,dtype=jnp.intaa ), 'attention_mask': jnp.array(__A ,dtype=jnp.intaa ), 'start_labels': jnp.array(features['start_token'] ,dtype=jnp.intaa ), 'end_labels': jnp.array(features['end_token'] ,dtype=jnp.intaa ), 'pooled_labels': jnp.array(features['category'] ,dtype=jnp.intaa ), } return batch def __lowercase ( self : Tuple ,_a : list ): '''simple docstring''' _a : Dict = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def __lowercase ( self : List[str] ,_a : list ): '''simple docstring''' _a : str = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def UpperCAmelCase_ (__a : str , __a : List[str] , __a : Optional[Any]=None ): """simple docstring""" if seed is not None: _a : Union[str, Any] = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): _a : Dict = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name='batch' ) def UpperCAmelCase_ (__a : Dict , __a : Optional[int] , **__a : Optional[int] ): """simple docstring""" def loss_fn(__a : List[Any] ): _a : Optional[int] = model_inputs.pop('start_labels' ) _a : int = model_inputs.pop('end_labels' ) _a : Optional[int] = model_inputs.pop('pooled_labels' ) _a : int = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) _a, _a, _a : str = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) _a, _a : Dict = jax.random.split(UpperCamelCase__ ) _a : int = jax.value_and_grad(UpperCamelCase__ ) _a, _a : str = grad_fn(state.params ) _a : Any = jax.lax.pmean({'loss': loss} , axis_name='batch' ) _a : int = jax.lax.pmean(UpperCamelCase__ , 'batch' ) _a : Optional[int] = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='batch' ) def UpperCAmelCase_ (__a : Optional[Any] , **__a : Dict ): """simple docstring""" _a : Dict = model_inputs.pop('start_labels' ) _a : Dict = model_inputs.pop('end_labels' ) _a : Union[str, Any] = model_inputs.pop('pooled_labels' ) _a : int = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) _a, _a, _a : str = outputs _a : str = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _a : Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' ) return metrics class UpperCAmelCase__ ( train_state.TrainState ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCAmelCase__ : """simple docstring""" __UpperCAmelCase : Dict = 42 __UpperCAmelCase : Optional[int] = 42 __UpperCAmelCase : List[str] = 42 __UpperCAmelCase : Tuple = 42 __UpperCAmelCase : Any = 42 __UpperCAmelCase : Tuple = 42 __UpperCAmelCase : Any = None def __lowercase ( self : List[Any] ,_a : str ,_a : str ,_a : str ,_a : Tuple=None ): '''simple docstring''' _a : Tuple = model.params _a : Union[str, Any] = TrainState.create( apply_fn=model.__call__ ,params=__A ,tx=__A ,loss_fn=__A ,) if ckpt_dir is not None: _a, _a, _a, _a, _a : Tuple = restore_checkpoint(__A ,__A ) _a : List[Any] = { 'lr': args.lr, 'init_lr': args.init_lr, 'warmup_steps': args.warmup_steps, 'num_train_steps': num_train_steps, 'weight_decay': args.weight_decay, } _a, _a : str = build_tx(**__A ) _a : Union[str, Any] = train_state.TrainState( step=__A ,apply_fn=model.__call__ ,params=__A ,tx=__A ,opt_state=__A ,) _a : Tuple = args _a : Optional[int] = data_collator _a : Optional[Any] = lr _a : Union[str, Any] = params _a : Any = jax_utils.replicate(__A ) return state def __lowercase ( self : Optional[Any] ,_a : Optional[int] ,_a : int ,_a : int ): '''simple docstring''' _a : Union[str, Any] = self.args _a : str = len(__A ) // args.batch_size _a : Optional[Any] = jax.random.PRNGKey(0 ) _a : str = jax.random.split(__A ,jax.device_count() ) for epoch in range(args.max_epochs ): _a : List[Any] = jnp.array(0 ,dtype=jnp.floataa ) _a : int = get_batched_dataset(__A ,args.batch_size ,seed=__A ) _a : Optional[int] = 0 for batch in tqdm(__A ,total=__A ,desc=F"""Running EPOCH-{epoch}""" ): _a : List[str] = self.data_collator(__A ) _a, _a, _a : List[str] = self.train_step_fn(__A ,__A ,**__A ) running_loss += jax_utils.unreplicate(metrics['loss'] ) i += 1 if i % args.logging_steps == 0: _a : Optional[int] = jax_utils.unreplicate(state.step ) _a : Tuple = running_loss.item() / i _a : int = self.scheduler_fn(state_step - 1 ) _a : Dict = self.evaluate(__A ,__A ) _a : Dict = { 'step': state_step.item(), 'eval_loss': eval_loss.item(), 'tr_loss': tr_loss, 'lr': lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A ,commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" ,state=__A ) def __lowercase ( self : List[str] ,_a : Dict ,_a : str ): '''simple docstring''' _a : str = get_batched_dataset(__A ,self.args.batch_size ) _a : int = len(__A ) // self.args.batch_size _a : Optional[int] = jnp.array(0 ,dtype=jnp.floataa ) _a : str = 0 for batch in tqdm(__A ,total=__A ,desc='Evaluating ... ' ): _a : Dict = self.data_collator(__A ) _a : int = self.val_step_fn(__A ,**__A ) running_loss += jax_utils.unreplicate(metrics['loss'] ) i += 1 return running_loss / i def __lowercase ( self : List[Any] ,_a : Any ,_a : Dict ): '''simple docstring''' _a : int = jax_utils.unreplicate(__A ) print(F"""SAVING CHECKPOINT IN {save_dir}""" ,end=' ... ' ) self.model_save_fn(__A ,params=state.params ) with open(os.path.join(__A ,'opt_state.msgpack' ) ,'wb' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args ,os.path.join(__A ,'args.joblib' ) ) joblib.dump(self.data_collator ,os.path.join(__A ,'data_collator.joblib' ) ) with open(os.path.join(__A ,'training_state.json' ) ,'w' ) as f: json.dump({'step': state.step.item()} ,__A ) print('DONE' ) def UpperCAmelCase_ (__a : int , __a : Optional[Any] ): """simple docstring""" print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=' ... ' ) with open(os.path.join(UpperCamelCase__ , 'flax_model.msgpack' ) , 'rb' ) as f: _a : int = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , 'opt_state.msgpack' ) , 'rb' ) as f: _a : int = from_bytes(state.opt_state , f.read() ) _a : List[str] = joblib.load(os.path.join(UpperCamelCase__ , 'args.joblib' ) ) _a : Union[str, Any] = joblib.load(os.path.join(UpperCamelCase__ , 'data_collator.joblib' ) ) with open(os.path.join(UpperCamelCase__ , 'training_state.json' ) , 'r' ) as f: _a : Dict = json.load(UpperCamelCase__ ) _a : Dict = training_state['step'] print('DONE' ) return params, opt_state, step, args, data_collator def UpperCAmelCase_ (__a : int , __a : Union[str, Any] , __a : List[Any] , __a : Dict ): """simple docstring""" _a : List[Any] = num_train_steps - warmup_steps _a : List[str] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) _a : List[Any] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) _a : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCAmelCase_ (__a : List[str] , __a : Union[str, Any] , __a : Tuple , __a : Tuple , __a : Tuple ): """simple docstring""" def weight_decay_mask(__a : Any ): _a : Tuple = traverse_util.flatten_dict(UpperCamelCase__ ) _a : Tuple = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) _a : Any = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _a : Union[str, Any] = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
229
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( UpperCamelCase__ ): """simple docstring""" _UpperCamelCase = ["image_processor", "tokenizer"] _UpperCamelCase = "FlavaImageProcessor" _UpperCamelCase = ("BertTokenizer", "BertTokenizerFast") def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ) -> str: a_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __A , ) a_ = kwargs.pop('feature_extractor' ) a_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__A , __A ) a_ = self.image_processor def __call__( self , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = True , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) if images is not None: a_ = self.image_processor( __A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , ) if text is not None and images is not None: encoding.update(__A ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Dict: return self.tokenizer.batch_decode(*__A , **__A ) def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]: return self.tokenizer.decode(*__A , **__A ) @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.tokenizer.model_input_names a_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , ) return self.image_processor_class @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , ) return self.image_processor
697
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor UpperCAmelCase : Any = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): def __init__( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any): """simple docstring""" warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A)
567
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class _UpperCamelCase (UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization snake_case_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case_ = Features({"""text""": Value("""string""" )} ) snake_case_ = Features({"""labels""": ClassLabel} ) snake_case_ = """text""" snake_case_ = """labels""" def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[Any]: if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __A ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) __lowerCAmelCase = copy.deepcopy(self ) __lowerCAmelCase = self.label_schema.copy() __lowerCAmelCase = features[self.label_column] __lowerCAmelCase = label_schema return task_template @property def __UpperCAmelCase ( self )-> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
367
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
from __future__ import annotations a__ : Any = 1_0 def snake_case (UpperCamelCase : list[int] ): '''simple docstring''' lowerCamelCase__ = 1 lowerCamelCase__ = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase__ = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase__ = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase__ = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase__ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
165
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a: Optional[int] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a: Dict = concatenate_datasets __a: int = DownloadConfig __a: Dict = DownloadManager __a: Optional[Any] = DownloadMode __a: Tuple = DownloadConfig __a: Tuple = DownloadMode __a: List[str] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
152
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): _lowerCAmelCase = "open-llama" def __init__(self , _lowercase=100000 , _lowercase=4096 , _lowercase=11008 , _lowercase=32 , _lowercase=32 , _lowercase="silu" , _lowercase=2048 , _lowercase=0.02 , _lowercase=1e-6 , _lowercase=True , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=0.1 , _lowercase=0.1 , _lowercase=True , _lowercase=True , _lowercase=None , **_lowercase , ): '''simple docstring''' __a : List[str] = vocab_size __a : Optional[Any] = max_position_embeddings __a : int = hidden_size __a : int = intermediate_size __a : Optional[Any] = num_hidden_layers __a : List[Any] = num_attention_heads __a : int = hidden_act __a : Any = initializer_range __a : str = rms_norm_eps __a : List[Any] = use_cache __a : List[str] = kwargs.pop( """use_memorry_efficient_attention""" , __A ) __a : int = hidden_dropout_prob __a : str = attention_dropout_prob __a : Optional[int] = use_stable_embedding __a : Dict = shared_input_output_embedding __a : List[str] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , ) def lowerCAmelCase__(self ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''' ) __a : str = self.rope_scaling.get("""type""" , __A ) __a : Union[str, Any] = self.rope_scaling.get("""factor""" , __A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
581
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCamelCase__ = TypeVar('''T''') class _lowerCAmelCase ( Generic[T] ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" snake_case__ : Union[str, Any] =data snake_case__ : Any =None def __str__( self ) -> str: """simple docstring""" return f'''{self.data}''' class _lowerCAmelCase ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: """simple docstring""" snake_case__ : str =None def __iter__( self ) -> Iterator[T]: """simple docstring""" snake_case__ : List[Any] =self.top while node: yield node.data snake_case__ : Tuple =node.next def __str__( self ) -> str: """simple docstring""" return "->".join([str(__A ) for item in self] ) def __len__( self ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def UpperCAmelCase ( self ) -> bool: """simple docstring""" return self.top is None def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" snake_case__ : Tuple =Node(__A ) if not self.is_empty(): snake_case__ : Union[str, Any] =self.top snake_case__ : str =node def UpperCAmelCase ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , __A ) snake_case__ : Union[str, Any] =self.top snake_case__ : List[str] =self.top.next return pop_node.data def UpperCAmelCase ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def UpperCAmelCase ( self ) -> None: """simple docstring""" snake_case__ : Optional[int] =None if __name__ == "__main__": from doctest import testmod testmod()
381
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" def a ( __snake_case : list, __snake_case : list, __snake_case : int ): '''simple docstring''' UpperCAmelCase_ :Dict = len(UpperCamelCase__ ) UpperCAmelCase_ :Union[str, Any] = [[0] * n for i in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ ): UpperCAmelCase_ :List[Any] = y_points[i] for i in range(2, UpperCamelCase__ ): for j in range(UpperCamelCase__, UpperCamelCase__ ): UpperCAmelCase_ :Any = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
608
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=True ): '''simple docstring''' model.train() __lowerCamelCase : Dict =model(UpperCamelCase__ ) __lowerCamelCase : str =F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=False ): '''simple docstring''' set_seed(42 ) __lowerCamelCase : Union[str, Any] =RegressionModel() __lowerCamelCase : Optional[Any] =deepcopy(UpperCamelCase__ ) __lowerCamelCase : str =RegressionDataset(length=80 ) __lowerCamelCase : str =DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: __lowerCamelCase : str =AdamW(params=model.parameters() , lr=1E-3 ) __lowerCamelCase : List[str] =AdamW(params=ddp_model.parameters() , lr=1E-3 ) __lowerCamelCase : str =LambdaLR(UpperCamelCase__ , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) __lowerCamelCase : Union[str, Any] =LambdaLR(UpperCamelCase__ , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 ) # Make a copy of `model` if sched: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict =accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __lowerCamelCase , __lowerCamelCase : Optional[Any] =accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] =get_training_setup(UpperCamelCase__ ) # Use a single batch __lowerCamelCase , __lowerCamelCase : int =next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowerCamelCase , __lowerCamelCase : Optional[int] =accelerator.gather((ddp_input, ddp_target) ) __lowerCamelCase , __lowerCamelCase : Tuple =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __lowerCamelCase : Optional[Any] =ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple =get_training_setup(UpperCamelCase__ ) # Use a single batch __lowerCamelCase , __lowerCamelCase : Tuple =next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __lowerCamelCase , __lowerCamelCase : Optional[Any] =accelerator.gather((ddp_input, ddp_target) ) __lowerCamelCase , __lowerCamelCase : Any =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __lowerCamelCase : Dict =ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=False ): '''simple docstring''' __lowerCamelCase : List[str] =Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] =get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): __lowerCamelCase , __lowerCamelCase : Any =batch.values() # Gather the distributed inputs and targs for the base model __lowerCamelCase , __lowerCamelCase : List[Any] =accelerator.gather((ddp_input, ddp_target) ) __lowerCamelCase , __lowerCamelCase : Optional[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __lowerCamelCase : List[Any] =ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[str]=False ): '''simple docstring''' __lowerCamelCase : Any =Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] =get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): __lowerCamelCase , __lowerCamelCase : Any =batch.values() # Gather the distributed inputs and targs for the base model __lowerCamelCase , __lowerCamelCase : Tuple =accelerator.gather((ddp_input, ddp_target) ) __lowerCamelCase , __lowerCamelCase : List[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' __lowerCamelCase : Union[str, Any] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ): '''simple docstring''' __lowerCamelCase : Optional[int] =Accelerator() __lowerCamelCase : Dict =RegressionDataset(length=80 ) __lowerCamelCase : Optional[int] =DataLoader(UpperCamelCase__ , batch_size=16 ) __lowerCamelCase : int =RegressionDataset(length=96 ) __lowerCamelCase : List[str] =DataLoader(UpperCamelCase__ , batch_size=16 ) __lowerCamelCase , __lowerCamelCase : int =accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ): '''simple docstring''' __lowerCamelCase : List[str] =Accelerator() __lowerCamelCase : Tuple =accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
179
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def UpperCamelCase__ ( lowerCAmelCase__ = 100 ): lowercase = 1 lowercase = 2 for i in range(2 ,max_n + 1 ): lowercase = pre_numerator lowercase = 2 * i // 3 if i % 3 == 0 else 1 lowercase = cur_numerator lowercase = e_cont * pre_numerator + temp return sum_digits(UpperCamelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
428
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
'''simple docstring''' import argparse import struct import unittest class UpperCAmelCase__ : """simple docstring""" def __init__( self : Dict ,_a : bytes ): '''simple docstring''' _a : Optional[int] = data # Initialize hash values _a : Dict = [ 0X6a09_e667, 0Xbb67_ae85, 0X3c6e_f372, 0Xa54f_f53a, 0X510e_527f, 0X9b05_688c, 0X1f83_d9ab, 0X5be0_cd19, ] # Initialize round constants _a : int = [ 0X428a_2f98, 0X7137_4491, 0Xb5c0_fbcf, 0Xe9b5_dba5, 0X3956_c25b, 0X59f1_11f1, 0X923f_82a4, 0Xab1c_5ed5, 0Xd807_aa98, 0X1283_5b01, 0X2431_85be, 0X550c_7dc3, 0X72be_5d74, 0X80de_b1fe, 0X9bdc_06a7, 0Xc19b_f174, 0Xe49b_69c1, 0Xefbe_4786, 0X0fc1_9dc6, 0X240c_a1cc, 0X2de9_2c6f, 0X4a74_84aa, 0X5cb0_a9dc, 0X76f9_88da, 0X983e_5152, 0Xa831_c66d, 0Xb003_27c8, 0Xbf59_7fc7, 0Xc6e0_0bf3, 0Xd5a7_9147, 0X06ca_6351, 0X1429_2967, 0X27b7_0a85, 0X2e1b_2138, 0X4d2c_6dfc, 0X5338_0d13, 0X650a_7354, 0X766a_0abb, 0X81c2_c92e, 0X9272_2c85, 0Xa2bf_e8a1, 0Xa81a_664b, 0Xc24b_8b70, 0Xc76c_51a3, 0Xd192_e819, 0Xd699_0624, 0Xf40e_3585, 0X106a_a070, 0X19a4_c116, 0X1e37_6c08, 0X2748_774c, 0X34b0_bcb5, 0X391c_0cb3, 0X4ed8_aa4a, 0X5b9c_ca4f, 0X682e_6ff3, 0X748f_82ee, 0X78a5_636f, 0X84c8_7814, 0X8cc7_0208, 0X90be_fffa, 0Xa450_6ceb, 0Xbef9_a3f7, 0Xc671_78f2, ] _a : Union[str, Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def __lowercase ( _a : bytes ): '''simple docstring''' _a : Any = B'\x80' + (B'\x00' * (63 - (len(__A ) + 8) % 64)) _a : Optional[int] = struct.pack('>Q' ,(len(__A ) * 8) ) return data + padding + big_endian_integer def __lowercase ( self : Dict ): '''simple docstring''' _a : Tuple = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _a : List[Any] = list(struct.unpack('>16L' ,__A ) ) # add 48 0-ed integers words += [0] * 48 _a, _a, _a, _a, _a, _a, _a, _a : Optional[int] = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array _a : Dict = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) _a : List[str] = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) _a : List[Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0000_0000 # Compression _a : Any = self.ror(__A ,6 ) ^ self.ror(__A ,11 ) ^ self.ror(__A ,25 ) _a : Optional[Any] = (e & f) ^ ((~e & 0Xffff_ffff) & g) _a : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0000_0000 _a : str = self.ror(__A ,2 ) ^ self.ror(__A ,13 ) ^ self.ror(__A ,22 ) _a : Any = (a & b) ^ (a & c) ^ (b & c) _a : Tuple = (sa + maj) % 0X1_0000_0000 _a, _a, _a, _a, _a, _a, _a, _a : List[Any] = ( g, f, e, ((d + tempa) % 0X1_0000_0000), c, b, a, ((tempa + tempa) % 0X1_0000_0000), ) _a : str = [a, b, c, d, e, f, g, h] # Modify final values _a : Tuple = [ ((element + mutated_hash_values[index]) % 0X1_0000_0000) for index, element in enumerate(self.hashes ) ] _a : int = ''.join([hex(__A )[2:].zfill(8 ) for value in self.hashes] ) def __lowercase ( self : Any ,_a : int ,_a : int ): '''simple docstring''' return 0Xffff_ffff & (value << (32 - rotations)) | (value >> rotations) class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self : Tuple ): '''simple docstring''' import hashlib _a : int = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(__A ).hash ,hashlib.shaaaa(__A ).hexdigest() ) def UpperCAmelCase_ (): """simple docstring""" import doctest doctest.testmod() _a : Any = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) _a : str = parser.parse_args() _a : List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _a : Dict = f.read() else: _a : List[Any] = bytes(UpperCamelCase__ , 'utf-8' ) print(SHAaaa(UpperCamelCase__ ).hash ) if __name__ == "__main__": main()
229
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __lowerCAmelCase =["text", "image", "audio"] def a ( _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = [] for input_type in input_types: if input_type == "text": inputs.append('Text input' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): inputs.append(create_inputs(UpperCamelCase__ ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = [] for output in outputs: if isinstance(UpperCamelCase__ , (str, AgentText) ): output_types.append('text' ) elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ): output_types.append('image' ) elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ): output_types.append('audio' ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class _snake_case : """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: self.assertTrue(hasattr(self.tool , 'inputs' ) ) self.assertTrue(hasattr(self.tool , 'outputs' ) ) a_ = self.tool.inputs for _input in inputs: if isinstance(_input , __A ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) a_ = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = create_inputs(self.tool.inputs ) a_ = self.tool(*__A ) # There is a single output if len(self.tool.outputs ) == 1: a_ = [outputs] self.assertListEqual(output_types(__A ) , self.tool.outputs ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: self.assertTrue(hasattr(self.tool , 'description' ) ) self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) ) self.assertTrue(self.tool.description.startswith('This is a tool that' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = create_inputs(self.tool.inputs ) a_ = self.tool(*__A ) if not isinstance(__A , __A ): a_ = [outputs] self.assertEqual(len(__A ) , len(self.tool.outputs ) ) for output, output_type in zip(__A , self.tool.outputs ): a_ = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__A , __A ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = create_inputs(self.tool.inputs ) a_ = [] for _input, input_type in zip(__A , self.tool.inputs ): if isinstance(__A , __A ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error a_ = self.tool(*__A ) if not isinstance(__A , __A ): a_ = [outputs] self.assertEqual(len(__A ) , len(self.tool.outputs ) )
697
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
"""simple docstring""" import math def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ = len(UpperCamelCase__ ) lowercase_ = int(math.floor(math.sqrt(UpperCamelCase__ ) ) ) lowercase_ = 0 while arr[min(UpperCamelCase__ , UpperCamelCase__ ) - 1] < x: lowercase_ = step step += int(math.floor(math.sqrt(UpperCamelCase__ ) ) ) if prev >= n: return -1 while arr[prev] < x: lowercase_ = prev + 1 if prev == min(UpperCamelCase__ , UpperCamelCase__ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase : str = [int(item) for item in user_input.split(",")] UpperCAmelCase : Optional[int] = int(input("Enter the number to be searched:\n")) UpperCAmelCase : Optional[int] = jump_search(arr, x) if res == -1: print("Number not found!") else: print(F"Number {x} is at index {res}")
567
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig lowerCamelCase : List[str] = logging.get_logger(__name__) # General docstring lowerCamelCase : str = '''PoolFormerConfig''' # Base docstring lowerCamelCase : Any = '''sail/poolformer_s12''' lowerCamelCase : Dict = [1, 512, 7, 7] # Image classification docstring lowerCamelCase : Union[str, Any] = '''sail/poolformer_s12''' lowerCamelCase : Optional[int] = '''tabby, tabby cat''' lowerCamelCase : str = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def __lowerCAmelCase ( __snake_case , __snake_case = 0.0 , __snake_case = False ): if drop_prob == 0.0 or not training: return input __lowerCAmelCase = 1 - drop_prob __lowerCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets __lowerCAmelCase = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize __lowerCAmelCase = input.div(UpperCamelCase__ ) * random_tensor return output class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase = None )-> None: super().__init__() __lowerCAmelCase = drop_prob def __UpperCAmelCase ( self , __UpperCamelCase )-> torch.Tensor: return drop_path(__A , self.drop_prob , self.training ) def __UpperCAmelCase ( self )-> str: return "p={}".format(self.drop_prob ) class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None )-> Union[str, Any]: super().__init__() __lowerCAmelCase = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) __lowerCAmelCase = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) __lowerCAmelCase = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) __lowerCAmelCase = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) __lowerCAmelCase = norm_layer(__A ) if norm_layer else nn.Identity() def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[Any]: __lowerCAmelCase = self.projection(__A ) __lowerCAmelCase = self.norm(__A ) return embeddings class _UpperCamelCase (nn.GroupNorm ): def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> Dict: super().__init__(1 , __A , **__A ) class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase )-> Any: super().__init__() __lowerCAmelCase = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[Any]: return self.pool(__A ) - hidden_states class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str: super().__init__() __lowerCAmelCase = nn.Convad(__A , __A , 1 ) __lowerCAmelCase = nn.Convad(__A , __A , 1 ) __lowerCAmelCase = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): __lowerCAmelCase = ACTaFN[config.hidden_act] else: __lowerCAmelCase = config.hidden_act def __UpperCAmelCase ( self , __UpperCamelCase )-> Dict: __lowerCAmelCase = self.conva(__A ) __lowerCAmelCase = self.act_fn(__A ) __lowerCAmelCase = self.drop(__A ) __lowerCAmelCase = self.conva(__A ) __lowerCAmelCase = self.drop(__A ) return hidden_states class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]: super().__init__() __lowerCAmelCase = PoolFormerPooling(__A ) __lowerCAmelCase = PoolFormerOutput(__A , __A , __A , __A ) __lowerCAmelCase = PoolFormerGroupNorm(__A ) __lowerCAmelCase = PoolFormerGroupNorm(__A ) # Useful for training neural nets __lowerCAmelCase = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() __lowerCAmelCase = config.use_layer_scale if config.use_layer_scale: __lowerCAmelCase = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) __lowerCAmelCase = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def __UpperCAmelCase ( self , __UpperCamelCase )-> str: if self.use_layer_scale: __lowerCAmelCase = self.pooling(self.before_norm(__A ) ) __lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection __lowerCAmelCase = hidden_states + self.drop_path(__A ) __lowerCAmelCase = () __lowerCAmelCase = self.output(self.after_norm(__A ) ) __lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection __lowerCAmelCase = hidden_states + self.drop_path(__A ) __lowerCAmelCase = (output,) + outputs return outputs else: __lowerCAmelCase = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection __lowerCAmelCase = pooling_output + hidden_states __lowerCAmelCase = () # Second residual connection inside the PoolFormerOutput block __lowerCAmelCase = self.drop_path(self.output(self.after_norm(__A ) ) ) __lowerCAmelCase = hidden_states + layer_output __lowerCAmelCase = (output,) + outputs return outputs class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase )-> Union[str, Any]: super().__init__() __lowerCAmelCase = config # stochastic depth decay rule __lowerCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings __lowerCAmelCase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) __lowerCAmelCase = nn.ModuleList(__A ) # Transformer blocks __lowerCAmelCase = [] __lowerCAmelCase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers __lowerCAmelCase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) __lowerCAmelCase = nn.ModuleList(__A ) def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True )-> Optional[int]: __lowerCAmelCase = () if output_hidden_states else None __lowerCAmelCase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): __lowerCAmelCase , __lowerCAmelCase = layers # Get patch embeddings from hidden_states __lowerCAmelCase = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): __lowerCAmelCase = blk(__A ) __lowerCAmelCase = layer_outputs[0] if output_hidden_states: __lowerCAmelCase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class _UpperCamelCase (UpperCamelCase__ ): snake_case_ = PoolFormerConfig snake_case_ = """poolformer""" snake_case_ = """pixel_values""" snake_case_ = True def __UpperCAmelCase ( self , __UpperCamelCase )-> Dict: if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False )-> Any: if isinstance(__A , __A ): __lowerCAmelCase = value lowerCamelCase : Optional[Any] = R'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n''' lowerCamelCase : List[Any] = R'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n''' @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , UpperCamelCase__ , ) class _UpperCamelCase (UpperCamelCase__ ): def __init__( self , __UpperCamelCase )-> int: super().__init__(__A ) __lowerCAmelCase = config __lowerCAmelCase = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self )-> Union[str, Any]: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> Union[Tuple, BaseModelOutputWithNoAttention]: __lowerCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) __lowerCAmelCase = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) __lowerCAmelCase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class _UpperCamelCase (nn.Module ): def __init__( self , __UpperCamelCase )-> Tuple: super().__init__() __lowerCAmelCase = nn.Linear(config.hidden_size , config.hidden_size ) def __UpperCAmelCase ( self , __UpperCamelCase )-> int: __lowerCAmelCase = self.dense(__A ) return output @add_start_docstrings( """\n PoolFormer Model transformer with an image classification head on top\n """ , UpperCamelCase__ , ) class _UpperCamelCase (UpperCamelCase__ ): def __init__( self , __UpperCamelCase )-> int: super().__init__(__A ) __lowerCAmelCase = config.num_labels __lowerCAmelCase = PoolFormerModel(__A ) # Final norm __lowerCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head __lowerCAmelCase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> Union[Tuple, ImageClassifierOutputWithNoAttention]: __lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCAmelCase = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) __lowerCAmelCase = outputs[0] __lowerCAmelCase = self.classifier(self.norm(__A ).mean([-2, -1] ) ) __lowerCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCAmelCase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCAmelCase = "single_label_classification" else: __lowerCAmelCase = "multi_label_classification" if self.config.problem_type == "regression": __lowerCAmelCase = MSELoss() if self.num_labels == 1: __lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCAmelCase = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": __lowerCAmelCase = CrossEntropyLoss() __lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCAmelCase = BCEWithLogitsLoss() __lowerCAmelCase = loss_fct(__A , __A ) if not return_dict: __lowerCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
367
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
def snake_case (UpperCamelCase : list ): '''simple docstring''' if any(not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(UpperCamelCase__ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase__ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
165
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : str , _snake_case : str ) -> bool: '''simple docstring''' _A = get_failure_array(_snake_case ) # 2) Step through text searching for pattern _A , _A = 0, 0 # index into text, pattern while i < len(_snake_case ): if pattern[j] == text[i]: if j == (len(_snake_case ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _A = failure[j - 1] continue i += 1 return False def _snake_case ( _snake_case : str ) -> list[int]: '''simple docstring''' _A = [0] _A = 0 _A = 1 while j < len(_snake_case ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _A = failure[i - 1] continue j += 1 failure.append(_snake_case ) return failure if __name__ == "__main__": # Test 1) a = '''abc1abc12''' a = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' a = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) a = '''ABABX''' a = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) a = '''AAAB''' a = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) a = '''abcdabcy''' a = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) a = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
7
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''gpt_bigcode''' UpperCAmelCase : str = ['''past_key_values'''] UpperCAmelCase : Dict = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ): _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = n_inner _A = activation_function _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = scale_attn_weights _A = use_cache _A = attention_softmax_in_fpaa _A = scale_attention_softmax_in_fpaa _A = multi_query _A = bos_token_id _A = eos_token_id super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
7
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''PoolFormerFeatureExtractor'''] a = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
7
"""simple docstring""" def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return " ".join( ''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
7
1
"""simple docstring""" a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00] number //= 10_00_00 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a = [None] * 10_000_000 a = True a = False def _snake_case ( _snake_case : int ) -> bool: '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _A = chain(next_number(_snake_case ) ) _A = number_chain while number < 10_00_00_00: _A = number_chain number *= 10 return number_chain def _snake_case ( _snake_case : int = 10_00_00_00 ) -> int: '''simple docstring''' for i in range(1 , _snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
7
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,) UpperCAmelCase : Any = 10 def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ): _A = { 'num_train_timesteps': 1_100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**_UpperCAmelCase ) return config def lowerCAmelCase_ ( self : Any ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.scheduler_classes[0] _A = self.get_scheduler_config(prediction_type='v_prediction' ) _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _A = self.dummy_model() _A = self.dummy_sample_deter * scheduler.init_noise_sigma _A = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2 assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def lowerCAmelCase_ ( self : Optional[Any] ): if torch_device == "mps": return _A = self.scheduler_classes[0] _A = self.get_scheduler_config() _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _A = self.dummy_model() _A = self.dummy_sample_deter * scheduler.init_noise_sigma _A = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def lowerCAmelCase_ ( self : Any ): if torch_device == "mps": return _A = self.scheduler_classes[0] _A = self.get_scheduler_config() _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) _A = self.dummy_model() _A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if str(_UpperCAmelCase ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
7
1
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' for param in module.parameters(): _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = 'cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _A = 'mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' _A = plt.imshow(_snake_case ) fig.axes.get_xaxis().set_visible(_snake_case ) fig.axes.get_yaxis().set_visible(_snake_case ) plt.show() def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = datetime.now() _A = current_time.strftime('%H:%M:%S' ) return timestamp
7
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]: '''simple docstring''' _A = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]: '''simple docstring''' _A = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(_snake_case , 'schedule.bin' ) torch.save(scheduler.state_dict() , _snake_case ) _A = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any ): _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): _A = criterion(_UpperCAmelCase , _UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCAmelCase_ ( self : int ): _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , ) for _ in range(1_000 ): _A = criterion(_UpperCAmelCase , _UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCAmelCase : Dict = 10 def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ): self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _A = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _A , _A = data _A = scheduler_func(self.optimizer , **_UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _A = unwrap_schedule(_UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( _UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) _A = scheduler_func(self.optimizer , **_UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule _A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' ) class lowercase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ): _A = fn def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ): return self.fn(*_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ): _A = list(map(self , scheduler.lr_lambdas ) )
7
1
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' while a != 0: _A , _A = b % a, a return b def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' if gcd(_snake_case , _snake_case ) != 1: _A = F'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_snake_case ) _A , _A , _A = 1, 0, a _A , _A , _A = 0, 1, m while va != 0: _A = ua // va _A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
7
"""simple docstring""" import math def _snake_case ( _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' if ( not isinstance(_snake_case , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def _snake_case ( _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' if ( not isinstance(_snake_case , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
7
1
"""simple docstring""" from math import sqrt def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = 0 for i in range(1 , int(sqrt(_snake_case ) + 1 ) ): if n % i == 0 and i != sqrt(_snake_case ): total += i + n // i elif i == sqrt(_snake_case ): total += i return total - n def _snake_case ( _snake_case : int = 1_00_00 ) -> int: '''simple docstring''' _A = sum( i for i in range(1 , _snake_case ) if sum_of_divisors(sum_of_divisors(_snake_case ) ) == i and sum_of_divisors(_snake_case ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
7
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''', '''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''', '''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''', '''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''', '''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''', '''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''', '''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''', '''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''', '''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = '''xmod''' def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout _A = pre_norm _A = adapter_reduction_factor _A = adapter_layer_norm _A = adapter_reuse_layer_norm _A = ln_before_adapter _A = list(_UpperCAmelCase ) _A = default_language class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self : Dict ): if self.task == "multiple-choice": _A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
7
1
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,) UpperCAmelCase : Any = 10 def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ): _A = { 'num_train_timesteps': 1_100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**_UpperCAmelCase ) return config def lowerCAmelCase_ ( self : Any ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.scheduler_classes[0] _A = self.get_scheduler_config(prediction_type='v_prediction' ) _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _A = self.dummy_model() _A = self.dummy_sample_deter * scheduler.init_noise_sigma _A = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2 assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def lowerCAmelCase_ ( self : Optional[Any] ): if torch_device == "mps": return _A = self.scheduler_classes[0] _A = self.get_scheduler_config() _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _A = self.dummy_model() _A = self.dummy_sample_deter * scheduler.init_noise_sigma _A = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def lowerCAmelCase_ ( self : Any ): if torch_device == "mps": return _A = self.scheduler_classes[0] _A = self.get_scheduler_config() _A = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) _A = self.dummy_model() _A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = output.prev_sample _A = torch.sum(torch.abs(_UpperCAmelCase ) ) _A = torch.mean(torch.abs(_UpperCAmelCase ) ) if str(_UpperCAmelCase ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
7
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort a = logging.get_logger(__name__) a = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class lowercase_ : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ): logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) _A = model _A = kwargs.get('model_save_dir' , _UpperCAmelCase ) _A = kwargs.get('latest_model_name' , _UpperCAmelCase ) def __call__( self : Dict , **_UpperCAmelCase : List[Any] ): _A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()} return self.model.run(_UpperCAmelCase , _UpperCAmelCase ) @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ): if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) _A = 'CPUExecutionProvider' return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ): _A = file_name if file_name is not None else ONNX_WEIGHTS_NAME _A = self.model_save_dir.joinpath(self.latest_model_name ) _A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase ) try: shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) _A = self.model_save_dir.joinpath(_UpperCAmelCase ) if src_path.exists(): _A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase ) try: shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase ) except shutil.SameFileError: pass def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ): if os.path.isfile(_UpperCAmelCase ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) # saving model weights/files self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ): _A = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_UpperCAmelCase ): _A = OnnxRuntimeModel.load_model( os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase ) _A = Path(_UpperCAmelCase ) # load model from hub else: # download model _A = hf_hub_download( repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , ) _A = Path(_UpperCAmelCase ).parent _A = Path(_UpperCAmelCase ).name _A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase ) return cls(model=_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ): _A = None if len(str(_UpperCAmelCase ).split('@' ) ) == 2: _A , _A = model_id.split('@' ) return cls._from_pretrained( model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
7
1
"""simple docstring""" def _snake_case ( _snake_case : int = 1_00 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
7
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = '''speech_to_text''' UpperCAmelCase : List[Any] = ['''past_key_values'''] UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ): _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(_UpperCAmelCase ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
7
1
"""simple docstring""" import os import numpy import onnx def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any ) -> Dict: '''simple docstring''' _A = a.name _A = b.name _A = '' _A = '' _A = a == b _A = name_a _A = name_b return res def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_snake_case , _snake_case ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) _graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Any ) -> int: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(_snake_case , _snake_case , _snake_case ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any ) -> Optional[Any]: '''simple docstring''' _A = list(model.graph.initializer ) _A = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _A = inits[i].name _A = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case ) def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = os.path.dirname(_snake_case ) _A = os.path.basename(_snake_case ) _A = onnx.load(os.path.join(_snake_case , _snake_case ) ) _A = list(model.graph.initializer ) _A = set() _A = {} _A = [] _A = 0 for i in range(len(_snake_case ) ): if i in dup_set: continue for j in range(i + 1 , len(_snake_case ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_snake_case ) dup_set.add(_snake_case ) _A = inits[j].data_type _A = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , _snake_case ) total_reduced_size += mem_size _A = inits[i].name _A = inits[j].name if name_i in dup_map: dup_map[name_i].append(_snake_case ) else: _A = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' ) _A = sorted(_snake_case ) _remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case ) _A = 'optimized_' + model_file_name _A = os.path.join(_snake_case , _snake_case ) onnx.save(_snake_case , _snake_case ) return new_model
7
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
7
1
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = IFPipeline UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCAmelCase_ ( self : int ): return self._get_dummy_components() def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : Union[str, Any] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def lowerCAmelCase_ ( self : Optional[int] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCAmelCase_ ( self : int ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCAmelCase_ ( self : int ): self._test_save_load_local() def lowerCAmelCase_ ( self : Any ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase_ ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Optional[Any] ): # if _A = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa ) _A = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) _A , _A = pipe_a.encode_prompt('anime turtle' , device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _A = None _A = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _A = IFImgaImgPipeline(**pipe_a.components ) _A = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _A = IFInpaintingPipeline(**pipe_a.components ) _A = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ): # pipeline 1 _start_torch_memory_measurement() _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , ) _A = output.images[0] assert image.shape == (256, 256, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ): # pipeline 1 _start_torch_memory_measurement() _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , ) _A = output.images[0] assert image.shape == (256, 256, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ): # pipeline 1 _start_torch_memory_measurement() _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , ) _A = output.images[0] assert image.shape == (64, 64, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) _A = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) _A = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , ) _A = output.images[0] assert image.shape == (256, 256, 3) _A = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( ) -> Optional[int]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
7
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> int: '''simple docstring''' return int((input_a, input_a).count(1 ) != 0 ) def _snake_case ( ) -> None: '''simple docstring''' assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
7
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 a = sys.version_info >= (3, 10) def _snake_case ( _snake_case : int=None , _snake_case : str=None ) -> Tuple: '''simple docstring''' return field(default_factory=lambda: default , metadata=_snake_case ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float UpperCAmelCase : str UpperCAmelCase : bool @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int = 42 UpperCAmelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : bool = False UpperCAmelCase : bool = True UpperCAmelCase : Optional[bool] = None class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = '''titi''' UpperCAmelCase : str = '''toto''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = '''titi''' UpperCAmelCase : Optional[int] = '''toto''' UpperCAmelCase : Union[str, Any] = 42 @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : BasicEnum = "toto" def lowerCAmelCase_ ( self : Any ): _A = BasicEnum(self.foo ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : MixedTypeEnum = "toto" def lowerCAmelCase_ ( self : List[str] ): _A = MixedTypeEnum(self.foo ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''} ) UpperCAmelCase : Optional[str] = None UpperCAmelCase : Optional[List[str]] = list_field(default=[] ) UpperCAmelCase : Optional[List[int]] = list_field(default=[] ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : List[int] = list_field(default=[] ) UpperCAmelCase : List[int] = list_field(default=[1, 2, 3] ) UpperCAmelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) UpperCAmelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : List[int] = field() UpperCAmelCase : str = field() UpperCAmelCase : BasicEnum = field() def lowerCAmelCase_ ( self : Union[str, Any] ): _A = BasicEnum(self.required_enum ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : "BasicEnum" = field() UpperCAmelCase : "Optional[bool]" = None UpperCAmelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} ) UpperCAmelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : bool = False UpperCAmelCase : bool = True UpperCAmelCase : bool | None = None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int | None = None UpperCAmelCase : float | None = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''} ) UpperCAmelCase : str | None = None UpperCAmelCase : list[str] | None = list_field(default=[] ) UpperCAmelCase : list[int] | None = list_field(default=[] ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : argparse.ArgumentParser , _UpperCAmelCase : argparse.ArgumentParser ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _A = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'} _A = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , _UpperCAmelCase ) and yy.get('choices' , _UpperCAmelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](_UpperCAmelCase ) , yy['type'](_UpperCAmelCase ) ) del xx["type"], yy["type"] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((_A) , ) = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase ) self.assertFalse(example.flag ) def lowerCAmelCase_ ( self : Tuple ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase ) expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz' ) expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase ) _A = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCAmelCase ) for dataclass_type in dataclass_types: _A = HfArgumentParser(_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[int] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _A = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _A = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _A = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _A = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) _A = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase_ ( self : int ): @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Literal["titi", "toto", 42] = "toto" _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _A = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _A = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase_ ( self : Optional[int] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual( _UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) _A = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = argparse.ArgumentParser() expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase ) expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message' ) expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase ) expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase ) expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase ) _A = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCAmelCase ) for dataclass_type in dataclass_types: _A = HfArgumentParser(_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) ) _A = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def lowerCAmelCase_ ( self : int ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , ) expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase ) expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } _A = parser.parse_dict(_UpperCAmelCase )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _A = os.path.join(_UpperCAmelCase , 'temp_json' ) os.mkdir(_UpperCAmelCase ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _A = os.path.join(_UpperCAmelCase , 'temp_yaml' ) os.mkdir(_UpperCAmelCase ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = HfArgumentParser(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase )
7
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments a = logging.getLogger(__name__) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} ) UpperCAmelCase : Optional[float] = field( default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) UpperCAmelCase : Optional[float] = field( default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) UpperCAmelCase : Optional[float] = field( default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) UpperCAmelCase : Optional[str] = field( default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
7
1
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> str: '''simple docstring''' _A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' _A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' ) _A = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) _A = transform(_snake_case ).unsqueeze(0 ).to(_snake_case ) return image def _snake_case ( _snake_case : List[str] ) -> Dict: '''simple docstring''' if "visual_encoder" in key: _A = re.sub('visual_encoder*' , 'vision_model.encoder' , _snake_case ) if "blocks" in key: _A = re.sub(R'blocks' , 'layers' , _snake_case ) if "attn" in key: _A = re.sub(R'attn' , 'self_attn' , _snake_case ) if "norm1" in key: _A = re.sub(R'norm1' , 'layer_norm1' , _snake_case ) if "norm2" in key: _A = re.sub(R'norm2' , 'layer_norm2' , _snake_case ) if "encoder.norm" in key: _A = re.sub(R'encoder.norm' , 'post_layernorm' , _snake_case ) if "encoder.patch_embed.proj" in key: _A = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _snake_case ) if "encoder.pos_embed" in key: _A = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _snake_case ) if "encoder.cls_token" in key: _A = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _snake_case ) if "self_attn" in key: _A = re.sub(R'self_attn.proj' , 'self_attn.projection' , _snake_case ) return key @torch.no_grad() def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str]=None ) -> Any: '''simple docstring''' if config_path is not None: _A = BlipConfig.from_pretrained(_snake_case ) else: _A = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) _A = BlipForConditionalGeneration(_snake_case ).eval() _A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' _A = blip_decoder(pretrained=_snake_case , image_size=3_84 , vit='base' ) _A = pt_model.eval() _A = pt_model.state_dict() for key in modified_state_dict.copy(): _A = modified_state_dict.pop(_snake_case ) _A = rename_key(_snake_case ) _A = value hf_model.load_state_dict(_snake_case ) _A = 3_84 _A = load_demo_image(image_size=_snake_case , device='cpu' ) _A = BertTokenizer.from_pretrained('bert-base-uncased' ) _A = tokenizer(['a picture of'] ).input_ids _A = hf_model.generate(_snake_case , _snake_case ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] _A = hf_model.generate(_snake_case ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(_snake_case ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' _A = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) _A = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit='base' ) vqa_model.eval() _A = vqa_model.state_dict() for key in modified_state_dict.copy(): _A = modified_state_dict.pop(_snake_case ) _A = rename_key(_snake_case ) _A = value _A = BlipForQuestionAnswering(_snake_case ) hf_vqa_model.load_state_dict(_snake_case ) _A = ['How many dogs are in this image?'] _A = tokenizer(_snake_case , return_tensors='pt' ).input_ids _A = hf_vqa_model.generate(_snake_case , _snake_case ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) _A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' _A = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit='base' ) itm_model.eval() _A = itm_model.state_dict() for key in modified_state_dict.copy(): _A = modified_state_dict.pop(_snake_case ) _A = rename_key(_snake_case ) _A = value _A = BlipForImageTextRetrieval(_snake_case ) _A = ['A picture of a woman with a dog sitting in a beach'] _A = tokenizer( _snake_case , return_tensors='pt' , padding='max_length' , truncation=_snake_case , max_length=35 , ).input_ids hf_itm_model.load_state_dict(_snake_case ) hf_itm_model.eval() _A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case ) _A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') a = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
7
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule a = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
7
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : int = RobertaTokenizer UpperCAmelCase : List[Any] = RobertaTokenizerFast UpperCAmelCase : List[str] = True UpperCAmelCase : int = {'''cls_token''': '''<s>'''} def lowerCAmelCase_ ( self : Union[str, Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) _A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _A = {'unk_token': '<unk>'} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : str ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , **_UpperCAmelCase : Optional[Any] ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] ): _A = 'lower newer' _A = 'lower newer' return input_text, output_text def lowerCAmelCase_ ( self : Any ): _A = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = 'lower newer' _A = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _A = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = tokens + [tokenizer.unk_token] _A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.tokenizer_class.from_pretrained('roberta-base' ) _A = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode( 'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) _A = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase_ ( self : List[str] ): _A = self.get_tokenizer() _A = 'Encode this sequence.' _A = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) _A = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) _A = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing spaces after special tokens _A = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) _A = 'Encode <mask> sequence' _A = 'Encode <mask>sequence' _A = tokenizer.encode(_UpperCAmelCase ) _A = encoded.index(_UpperCAmelCase ) _A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = tokenizer.encode(_UpperCAmelCase ) _A = encoded.index(_UpperCAmelCase ) _A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): pass def lowerCAmelCase_ ( self : List[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) _A = 'A, <mask> AllenNLP sentence.' _A = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase ) _A = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( _UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowerCAmelCase_ ( self : Tuple ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _A = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _A = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _A = F'''{text_of_1_token} {text_of_1_token}''' _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , ) _A = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase ) _A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
7
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : UNetaDModel UpperCAmelCase : KarrasVeScheduler def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ): super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ): _A = self.unet.config.sample_size _A = (batch_size, 3, img_size, img_size) _A = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) _A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper _A = self.scheduler.schedule[t] _A = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat _A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. _A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev _A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. _A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample _A = self.scheduler.step_correct( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , ) _A = step_output.prev_sample _A = (sample / 2 + 0.5).clamp(0 , 1 ) _A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
7
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging a = logging.get_logger(__name__) a = { '''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = '''mctct''' def __init__( self : Optional[Any] , _UpperCAmelCase : str=8_065 , _UpperCAmelCase : int=1_536 , _UpperCAmelCase : Tuple=36 , _UpperCAmelCase : int=6_144 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[str]=384 , _UpperCAmelCase : Dict=920 , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Optional[Any]=0.3 , _UpperCAmelCase : Any="relu" , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[int]=0.3 , _UpperCAmelCase : int=0.3 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Tuple=0.3 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : str=(7,) , _UpperCAmelCase : Tuple=(3,) , _UpperCAmelCase : Any=80 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]="sum" , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = intermediate_size _A = num_attention_heads _A = attention_head_dim _A = max_position_embeddings _A = layer_norm_eps _A = layerdrop _A = hidden_act _A = initializer_range _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = pad_token_id _A = bos_token_id _A = eos_token_id _A = conv_glu_dim _A = conv_dropout _A = num_conv_layers _A = input_feat_per_channel _A = input_channels _A = conv_channels _A = ctc_loss_reduction _A = ctc_zero_infinity # prevents config testing fail with exporting to json _A = list(_UpperCAmelCase ) _A = list(_UpperCAmelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
7
"""simple docstring""" class lowercase_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): _A = None _A = None _A = graph self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase ) _A = len(_UpperCAmelCase ) _A = None def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ): if sources is int: _A = [sources] if sinks is int: _A = [sinks] if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0: return _A = sources[0] _A = sinks[0] # make fake vertex if there are more # than one source or sink if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1: _A = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _A = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _A = max_input_flow _A = 0 _A = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _A = max_input_flow _A = size - 1 def lowerCAmelCase_ ( self : Optional[Any] ): if self.maximum_flow_algorithm is None: raise Exception('You need to set maximum flow algorithm before.' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ): _A = algorithm(self ) class lowercase_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ): _A = flow_network _A = flow_network.verticesCount _A = flow_network.sourceIndex _A = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _A = flow_network.graph _A = False def lowerCAmelCase_ ( self : Optional[Any] ): if not self.executed: self._algorithm() _A = True def lowerCAmelCase_ ( self : int ): pass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Any ): super().__init__(_UpperCAmelCase ) # use this to save your result _A = -1 def lowerCAmelCase_ ( self : Optional[Any] ): if not self.executed: raise Exception('You should execute algorithm before using its result!' ) return self.maximum_flow class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[Any] ): super().__init__(_UpperCAmelCase ) _A = [[0] * self.verticies_count for i in range(self.verticies_count )] _A = [0] * self.verticies_count _A = [0] * self.verticies_count def lowerCAmelCase_ ( self : Dict ): _A = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _A = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _A = 0 while i < len(_UpperCAmelCase ): _A = vertices_list[i] _A = self.heights[vertex_index] self.process_vertex(_UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) ) _A = 0 else: i += 1 _A = sum(self.preflow[self.source_index] ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(_UpperCAmelCase , _UpperCAmelCase ) self.relabel(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ): _A = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ): _A = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _A = self.heights[to_index] if min_height is not None: _A = min_height + 1 if __name__ == "__main__": a = [0] a = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network a = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate a = flow_network.find_maximum_flow() print(F'''maximum flow is {maximum_flow}''')
7
1
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer UpperCAmelCase : Tuple = False UpperCAmelCase : List[str] = True def lowerCAmelCase_ ( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing _A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Any ): _A = '[PAD]' _A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 1_012 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_012 ) def lowerCAmelCase_ ( self : List[str] ): _A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) _A = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowerCAmelCase_ ( self : Any ): return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowerCAmelCase_ ( self : Tuple ): _A = 'Hello World!' _A = [35_389, 6_672, 49, 2] self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): # fmt: off _A = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
7
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = SpeechTaTokenizer UpperCAmelCase : Tuple = False UpperCAmelCase : Optional[int] = True def lowerCAmelCase_ ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing _A = SpeechTaTokenizer(_UpperCAmelCase ) _A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) _A = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): _A = 'this is a test' _A = 'this is a test' return input_text, output_text def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ): _A , _A = self.get_input_output_texts(_UpperCAmelCase ) _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) return text, ids def lowerCAmelCase_ ( self : Optional[Any] ): _A = '<pad>' _A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-4] , 'œ' ) self.assertEqual(vocab_keys[-2] , '<mask>' ) self.assertEqual(vocab_keys[-1] , '<ctc_blank>' ) self.assertEqual(len(_UpperCAmelCase ) , 81 ) def lowerCAmelCase_ ( self : Optional[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase_ ( self : Any ): _A = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = tokenizer.vocab_size _A = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _A = ['aaaaa bbbbbb', 'cccccccccdddddddd'] _A = tokenizer.add_tokens(_UpperCAmelCase ) _A = tokenizer.vocab_size _A = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) ) _A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase ) self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} _A = tokenizer.add_special_tokens(_UpperCAmelCase ) _A = tokenizer.vocab_size _A = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) ) _A = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase ) self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase_ ( self : str ): pass def lowerCAmelCase_ ( self : Any ): pass def lowerCAmelCase_ ( self : Dict ): _A = self.get_tokenizer() _A = tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) _A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) # fmt: off self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on _A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def lowerCAmelCase_ ( self : List[Any] ): # Use custom sequence because this tokenizer does not handle numbers. _A = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off _A = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
7
1
"""simple docstring""" def _snake_case ( _snake_case : list ) -> list: '''simple docstring''' for i in range(len(_snake_case ) - 1 , 0 , -1 ): _A = False for j in range(_snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _A , _A = unsorted[j - 1], unsorted[j] _A = True for j in range(_snake_case ): if unsorted[j] > unsorted[j + 1]: _A , _A = unsorted[j + 1], unsorted[j] _A = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() a = input('''Enter numbers separated by a comma:\n''').strip() a = [int(item) for item in user_input.split(''',''')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
7
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
7
1
"""simple docstring""" import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def _snake_case ( _snake_case : int ) -> Any: '''simple docstring''' if hor == 1_28: _A = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') _A = (32, 1_28, 2_56) _A = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 32: _A = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') _A = (32, 64, 1_28, 2_56) _A = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') _A = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) _A = model.state_dict() _A = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 14, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_55_36, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } _A = UNetaDModel(**_snake_case ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) _A = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _A = state_dict.pop(_snake_case ) hf_value_function.load_state_dict(_snake_case ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f: json.dump(_snake_case , _snake_case ) def _snake_case ( ) -> List[str]: '''simple docstring''' _A = { 'in_channels': 14, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (32, 64, 1_28, 2_56), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_55_36, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } _A = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) _A = model _A = UNetaDModel(**_snake_case ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) _A = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _A = state_dict.pop(_snake_case ) hf_value_function.load_state_dict(_snake_case ) torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f: json.dump(_snake_case , _snake_case ) if __name__ == "__main__": unet(32) # unet(128) value_function()
7
"""simple docstring""" import argparse a = '''docs/source/_static/js/custom.js''' def _snake_case ( _snake_case : Dict ) -> Any: '''simple docstring''' with open(_snake_case , encoding='utf-8' , newline='\n' ) as f: _A = f.readlines() _A = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 _A = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') a = parser.parse_args() update_custom_js(args.version)
7
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = '''maskformer-swin''' UpperCAmelCase : Optional[int] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=224 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : int=3 , _UpperCAmelCase : str=96 , _UpperCAmelCase : Any=[2, 2, 6, 2] , _UpperCAmelCase : Tuple=[3, 6, 12, 24] , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Tuple=4.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : str=1E-5 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Optional[Any] , ): super().__init__(**_UpperCAmelCase ) _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = len(_UpperCAmelCase ) _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = layer_norm_eps _A = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _A = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) _A = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_UpperCAmelCase ) + 1 )] _A , _A = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
7
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''vit_mae''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ): super().__init__(**_UpperCAmelCase ) _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = image_size _A = patch_size _A = num_channels _A = qkv_bias _A = decoder_num_attention_heads _A = decoder_hidden_size _A = decoder_num_hidden_layers _A = decoder_intermediate_size _A = mask_ratio _A = norm_pix_loss
7
1
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _snake_case ( _snake_case : Any , _snake_case : str=7 ) -> Optional[Any]: '''simple docstring''' _A = None if token is not None: _A = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _A = '636036' _A = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _A = requests.get(_snake_case , headers=_snake_case ).json() return result["workflow_runs"] def _snake_case ( _snake_case : List[str] ) -> Optional[int]: '''simple docstring''' _A = get_daily_ci_runs(_snake_case ) _A = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _A = workflow_run['id'] break return workflow_run_id def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any ) -> Dict: '''simple docstring''' _A = get_last_daily_ci_runs(_snake_case ) if workflow_run_id is not None: _A = get_artifacts_links(worflow_run_id=_snake_case , token=_snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _A = artifacts_links[artifact_name] download_artifact( artifact_name=_snake_case , artifact_url=_snake_case , output_dir=_snake_case , token=_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple ) -> str: '''simple docstring''' get_last_daily_ci_artifacts(_snake_case , _snake_case , _snake_case ) _A = {} for artifact_name in artifact_names: _A = os.path.join(_snake_case , F'''{artifact_name}.zip''' ) if os.path.isfile(_snake_case ): _A = {} with zipfile.ZipFile(_snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(_snake_case ): # read the file with z.open(_snake_case ) as f: _A = f.read().decode('UTF-8' ) return results
7
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) a = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] a = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def _snake_case ( _snake_case : Optional[Any] ) -> str: '''simple docstring''' _A = torch.load(_snake_case , map_location='cpu' ) return sd def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]: '''simple docstring''' _A = OrderedDict() _A = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1] ) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict: '''simple docstring''' assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A = 'pretraining' if "vcr" in checkpoint_path: _A = {'visual_embedding_dim': 5_12} elif "vqa_advanced" in checkpoint_path: _A = {'visual_embedding_dim': 20_48} elif "vqa" in checkpoint_path: _A = {'visual_embedding_dim': 20_48} elif "nlvr" in checkpoint_path: _A = {'visual_embedding_dim': 10_24} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: _A = {'visual_embedding_dim': 5_12} _A = 'multichoice' elif "vqa_advanced" in checkpoint_path: _A = {'visual_embedding_dim': 20_48} _A = 'vqa_advanced' elif "vqa" in checkpoint_path: _A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29} _A = 'vqa' elif "nlvr" in checkpoint_path: _A = { 'visual_embedding_dim': 10_24, 'num_labels': 2, } _A = 'nlvr' _A = VisualBertConfig(**_snake_case ) # Load State Dict _A = load_state_dict(_snake_case ) _A = get_new_dict(_snake_case , _snake_case ) if model_type == "pretraining": _A = VisualBertForPreTraining(_snake_case ) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(_snake_case ) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(_snake_case ) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(_snake_case ) model.load_state_dict(_snake_case ) # Save Checkpoints Path(_snake_case ).mkdir(exist_ok=_snake_case ) model.save_pretrained(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') a = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
7
1
"""simple docstring""" def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' _A = [[] for _ in range(_snake_case )] _A = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1 or len(_snake_case ) <= key: return input_string for position, character in enumerate(_snake_case ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_snake_case ) _A = [''.join(_snake_case ) for row in temp_grid] _A = ''.join(_snake_case ) return output_string def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' _A = [] _A = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1: return input_string _A = [[] for _ in range(_snake_case )] # generates template for position in range(len(_snake_case ) ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('*' ) _A = 0 for row in temp_grid: # fills in the characters _A = input_string[counter : counter + len(_snake_case )] grid.append(list(_snake_case ) ) counter += len(_snake_case ) _A = '' # reads as zigzag for position in range(len(_snake_case ) ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _snake_case ( _snake_case : str ) -> dict[int, str]: '''simple docstring''' _A = {} for key_guess in range(1 , len(_snake_case ) ): # tries every key _A = decrypt(_snake_case , _snake_case ) return results if __name__ == "__main__": import doctest doctest.testmod()
7
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' for param in module.parameters(): _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = 'cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _A = 'mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' _A = plt.imshow(_snake_case ) fig.axes.get_xaxis().set_visible(_snake_case ) fig.axes.get_yaxis().set_visible(_snake_case ) plt.show() def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = datetime.now() _A = current_time.strftime('%H:%M:%S' ) return timestamp
7
1