code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str: """simple docstring""" A__ : int =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Any ="""""" else: A__ : Optional[int] ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : Optional[int] =in_proj_weight[ : config.hidden_size, : ] A__ : str =in_proj_bias[: config.hidden_size] A__ : Optional[Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : List[Any] =in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Dict =dct.pop(__snake_case ) A__ : Tuple =val def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str: """simple docstring""" A__ : Tuple =ViTConfig() # patch_size if model_name[-1] == "8": A__ : Optional[Any] =8 # set labels if required if not base_model: A__ : Optional[Any] =1_000 A__ : str ="""huggingface/label-files""" A__ : Any ="""imagenet-1k-id2label.json""" A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : List[Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: A__ : str =384 A__ : Optional[Any] =1_536 A__ : Optional[Any] =12 A__ : Union[str, Any] =6 # load original model from torch hub A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys A__ : List[str] =original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if base_model: A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval() else: A__ : List[str] =ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor A__ : Union[str, Any] =ViTImageProcessor() A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Union[str, Any] =encoding["""pixel_values"""] A__ : Union[str, Any] =model(__snake_case ) if base_model: A__ : List[str] =original_model(__snake_case ) assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: A__ : Optional[int] =original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[int] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' from typing import Any def __lowerCamelCase ( __snake_case : list ) -> list[Any]: """simple docstring""" if not input_list: return [] A__ : int =[input_list.count(__snake_case ) for value in input_list] A__ : Tuple =max(__snake_case ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__snake_case ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
687
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' A__ : int =SMALL_MODEL_IDENTIFIER A__ : Optional[Any] ="""pt""" A__ : List[str] ="""tf""" def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A__ : Dict =TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase_ ) model_tf.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : str ="""mock_framework""" # Framework provided - return whatever the user provides A__ : int =FeaturesManager.determine_framework(self.test_model , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowerCAmelCase_ ) A__ : Tuple =FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowerCAmelCase_ ) A__ : Dict =FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowerCAmelCase_ ) A__ : int =FeaturesManager.determine_framework(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowerCAmelCase_ ) A__ : int =FeaturesManager.determine_framework(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(lowerCAmelCase_ ): A__ : List[str] =FeaturesManager.determine_framework(lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : int =MagicMock(return_value=lowerCAmelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ): A__ : Optional[Any] =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCAmelCase_ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow A__ : Dict =MagicMock(return_value=lowerCAmelCase_ ) with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ): A__ : str =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCAmelCase_ , self.framework_tf ) # Both in environment -> use PyTorch A__ : str =MagicMock(return_value=lowerCAmelCase_ ) A__ : int =MagicMock(return_value=lowerCAmelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch( """transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ): A__ : str =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowerCAmelCase_ , self.framework_pt ) # Both not in environment -> raise error A__ : str =MagicMock(return_value=lowerCAmelCase_ ) A__ : str =MagicMock(return_value=lowerCAmelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch( """transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ): with self.assertRaises(lowerCAmelCase_ ): A__ : List[str] =FeaturesManager.determine_framework(self.test_model )
687
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'conditional_detr' __snake_case = ['past_key_values'] __snake_case = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Tuple =backbone_config.get("""model_type""" ) A__ : List[str] =CONFIG_MAPPING[backbone_model_type] A__ : Dict =config_class.from_dict(lowerCAmelCase_ ) A__ : int =use_timm_backbone A__ : List[Any] =backbone_config A__ : Optional[int] =num_channels A__ : Optional[int] =num_queries A__ : Union[str, Any] =d_model A__ : Optional[int] =encoder_ffn_dim A__ : Optional[Any] =encoder_layers A__ : int =encoder_attention_heads A__ : Optional[Any] =decoder_ffn_dim A__ : Tuple =decoder_layers A__ : Optional[Any] =decoder_attention_heads A__ : Tuple =dropout A__ : int =attention_dropout A__ : Dict =activation_dropout A__ : Union[str, Any] =activation_function A__ : List[str] =init_std A__ : str =init_xavier_std A__ : int =encoder_layerdrop A__ : List[Any] =decoder_layerdrop A__ : Tuple =encoder_layers A__ : Tuple =auxiliary_loss A__ : List[Any] =position_embedding_type A__ : int =backbone A__ : Optional[int] =use_pretrained_backbone A__ : str =dilation # Hungarian matcher A__ : Any =class_cost A__ : str =bbox_cost A__ : str =giou_cost # Loss coefficients A__ : Union[str, Any] =mask_loss_coefficient A__ : int =dice_loss_coefficient A__ : Union[str, Any] =cls_loss_coefficient A__ : List[str] =bbox_loss_coefficient A__ : str =giou_loss_coefficient A__ : Optional[Any] =focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return self.d_model def lowercase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : int =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A__ : str =self.backbone_config.to_dict() A__ : int =self.__class__.model_type return output class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowercase__ ( self : Any ) -> float: '''simple docstring''' return 1e-5 @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return 12
687
1
'''simple docstring''' __snake_case : Optional[int] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}] __snake_case : Union[str, Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
687
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
1
'''simple docstring''' import os from math import logaa def __lowerCamelCase ( __snake_case : str = "base_exp.txt" ) -> int: """simple docstring""" A__ : float =0 A__ : List[Any] =0 for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ), __snake_case ) ) ): A__ , A__ : str =list(map(__snake_case, line.split(""",""" ) ) ) if x * logaa(__snake_case ) > largest: A__ : Optional[int] =x * logaa(__snake_case ) A__ : Optional[Any] =i + 1 return result if __name__ == "__main__": print(solution())
687
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
1
'''simple docstring''' import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = GPTaTokenizer __snake_case = GPTaTokenizerFast __snake_case = True __snake_case = {'add_prefix_space': True} __snake_case = False def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ : List[str] =[ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] A__ : str =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : int =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] A__ : Union[str, Any] ={"""unk_token""": """<unk>"""} A__ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCAmelCase_ ) ) def lowercase__ ( self : str , **lowerCAmelCase_ : Dict ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowercase__ ( self : str , **lowerCAmelCase_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' A__ : List[str] ="""lower newer""" A__ : Tuple ="""lower newer""" return input_text, output_text def lowercase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ : Optional[int] ="""lower newer""" A__ : List[str] =["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] A__ : Union[str, Any] =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =tokens + [tokenizer.unk_token] A__ : Dict =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> int: '''simple docstring''' if not self.test_rust_tokenizer: return A__ : Any =self.get_tokenizer() A__ : List[Any] =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ ) A__ : str ="""lower newer""" # Testing tokenization A__ : Optional[Any] =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) A__ : Dict =rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids without special tokens A__ : Any =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) A__ : List[Any] =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids with special tokens A__ : List[Any] =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ ) A__ : List[Any] =tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) A__ : str =rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing the unknown token A__ : Optional[Any] =tokens + [rust_tokenizer.unk_token] A__ : List[Any] =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : Dict , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict=15 ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A__ : Tuple =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) # Simple input A__ : List[str] ="""This is a simple input""" A__ : List[Any] =["""This is a simple input 1""", """This is a simple input 2"""] A__ : Any =("""This is a simple input""", """This is a pair""") A__ : Optional[int] =[ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" ) # Simple input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" ) # Simple input self.assertRaises( lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , ) # Pair input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" ) # Pair input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" ) # Pair input self.assertRaises( lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Optional[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input A__ : List[Any] ="""This is a simple input""" A__ : Union[str, Any] =["""This is a simple input looooooooong""", """This is a simple input"""] A__ : Tuple =("""This is a simple input""", """This is a pair""") A__ : Optional[int] =[ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] A__ : Dict =tokenizer.pad_token_id A__ : str =tokenizer(lowerCAmelCase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) A__ : Optional[Any] =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="""np""" ) A__ : Dict =tokenizer(*lowerCAmelCase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) A__ : List[str] =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : List[str] ="""$$$""" A__ : List[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ ) A__ : Tuple ="""This is a simple input""" A__ : Dict =["""This is a simple input 1""", """This is a simple input 2"""] A__ : Optional[int] =tokenizer.bos_token_id A__ : List[str] =tokenizer(lowerCAmelCase_ ) A__ : Dict =tokenizer(lowerCAmelCase_ ) self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A__ : Optional[Any] =tokenizer.decode(out_s.input_ids ) A__ : List[str] =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' # TODO: change to self.get_tokenizers() when the fast version is implemented A__ : List[str] =[self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )] for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): A__ : Optional[int] ="""Encode this.""" A__ : Tuple ="""This one too please.""" A__ : Any =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : List[Any] =tokenizer.encode_plus( lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , ) A__ : List[str] =encoded_sequence_dict["""input_ids"""] A__ : List[Any] =encoded_sequence_dict["""special_tokens_mask"""] self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) A__ : List[str] =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_ ) ] A__ : List[str] =[x for x in filtered_sequence if x is not None] self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 A__ : Optional[int] =AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowerCAmelCase_ ) A__ : List[Any] ="""A photo of a cat""" A__ : int =tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained("""test_opt""" ) A__ : Optional[int] =AutoTokenizer.from_pretrained("""./test_opt""" ) A__ : Optional[Any] =tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : str =AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowerCAmelCase_ ) A__ : int ="""A photo of a cat""" A__ : Union[str, Any] =tokenizer.encode( lowerCAmelCase_ , ) # Same as above self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip("""This test is failing because of a bug in the fast tokenizer""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Dict =AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowerCAmelCase_ ) A__ : Dict ="""bos""" A__ : Union[str, Any] =tokenizer.get_vocab()["""bos"""] A__ : List[str] ="""A photo of a cat""" A__ : List[str] =tokenizer.encode( lowerCAmelCase_ , ) # We changed the bos token self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained("""./tok""" ) A__ : Tuple =AutoTokenizer.from_pretrained("""./tok""" ) self.assertTrue(tokenizer.is_fast ) A__ : List[Any] =tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
687
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __snake_case : str = False class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int ="""A painting of a squirrel eating a burger """ A__ : Tuple =torch.manual_seed(0 ) A__ : int =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int =generator.manual_seed(0 ) A__ : Tuple =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict ="""A painting of a squirrel eating a burger """ A__ : Optional[int] =torch.manual_seed(0 ) A__ : List[str] =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = RoCBertTokenizer __snake_case = None __snake_case = False __snake_case = True __snake_case = filter_non_english def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' super().setUp() A__ : Optional[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] A__ : int ={} A__ : Any ={} for i, value in enumerate(lowerCAmelCase_ ): A__ : Union[str, Any] =i A__ : List[str] =i A__ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] ) A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ : str =tokenizer.tokenize("""你好[SEP]你是谁""" ) self.assertListEqual(lowerCAmelCase_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase_ ) , [5, 6, 2, 5, 7, 8] ) def lowercase__ ( self : Dict ) -> Dict: '''simple docstring''' A__ : Optional[Any] =RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowercase__ ( self : int ) -> Any: '''simple docstring''' A__ : int =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def lowercase__ ( self : Dict ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowercase__ ( self : str ) -> Any: '''simple docstring''' A__ : int =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowercase__ ( self : int ) -> str: '''simple docstring''' A__ : Dict =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowercase__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' A__ : Any =RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def lowercase__ ( self : Union[str, Any] ) -> int: '''simple docstring''' A__ : int =["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] A__ : List[str] ={} for i, token in enumerate(lowerCAmelCase_ ): A__ : List[Any] =i A__ : Union[str, Any] =RoCBertWordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def lowercase__ ( self : Dict ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def lowercase__ ( self : Tuple ) -> List[Any]: '''simple docstring''' self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ : Union[str, Any] =self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) if self.test_rust_tokenizer: A__ : List[Any] =self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Dict =f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." A__ : Dict =tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) A__ : Tuple =tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , """do_lower_case""" ) else False A__ : Optional[int] =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' A__ : List[Any] =["""的""", """人""", """有"""] A__ : Optional[int] ="""""".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A__ : Any =True A__ : int =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Union[str, Any] =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : str =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : Tuple =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) A__ : Tuple =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : List[Any] =False A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Dict =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Optional[int] =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : Tuple =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : List[str] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". A__ : Optional[int] =[ f"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' A__ : List[Any] =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ : str =tokenizer.encode("""你好""" , add_special_tokens=lowerCAmelCase_ ) A__ : str =tokenizer.encode("""你是谁""" , add_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) A__ : List[Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self : str ) -> str: '''simple docstring''' A__ : Optional[int] =self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): A__ : Optional[int] ="""你好,你是谁""" A__ : List[Any] =tokenizer.tokenize(lowerCAmelCase_ ) A__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) A__ : str =tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase_ ) A__ : Any =tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase_ ) A__ : List[Any] =tokenizer.prepare_for_model( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =tokenizer.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
687
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Optional[Any] =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Optional[Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' A__ : Dict =self.encoder(lowerCAmelCase_ ) A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ ) else: A__ : List[str] =h A__ : Dict =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' A__ : Optional[int] =sample A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents A__ : Tuple =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[Any] ) -> int: """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(__snake_case ): for j in range(__snake_case ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ), end="""\t""" ) else: print("""INF""", end="""\t""" ) print() def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict ) -> List[str]: """simple docstring""" A__ : Tuple =[[float("""inf""" ) for _ in range(__snake_case )] for _ in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): A__ : int =graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__snake_case ): # looping through rows of graph array for i in range(__snake_case ): # looping through columns of graph array for j in range(__snake_case ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): A__ : str =dist[i][k] + dist[k][j] _print_dist(__snake_case, __snake_case ) return dist, v if __name__ == "__main__": __snake_case : Optional[Any] = int(input('Enter number of vertices: ')) __snake_case : Optional[int] = int(input('Enter number of edges: ')) __snake_case : int = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): __snake_case : Dict = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) __snake_case : List[Any] = int(input('Enter source:')) __snake_case : int = int(input('Enter destination:')) __snake_case : Dict = float(input('Enter weight:')) __snake_case : Union[str, Any] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
687
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __snake_case : str = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __snake_case : List[Any] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str: """simple docstring""" A__ : Optional[int] =set() A__ : Optional[int] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ : str =char A__ : List[Any] =set(__snake_case ) return pairs class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =vocab_file A__ : Any =merges_file A__ : Union[str, Any] ={} A__ : Optional[int] =0 A__ : List[Any] =1 A__ : Tuple =2 A__ : Dict =3 self.add_from_file(lowerCAmelCase_ ) A__ : List[str] ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: A__ : str =merges_handle.read().split("""\n""" )[:-1] A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges] A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Dict ={} def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ : Dict =[self.cls_token_id] A__ : Union[str, Any] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ : int =tuple(lowerCAmelCase_ ) A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) A__ : Tuple =get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ : Tuple =bigram A__ : Optional[int] =[] A__ : Tuple =0 while i < len(lowerCAmelCase_ ): try: A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ : Union[str, Any] =j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ : Dict =tuple(lowerCAmelCase_ ) A__ : Dict =new_word if len(lowerCAmelCase_ ) == 1: break else: A__ : str =get_pairs(lowerCAmelCase_ ) A__ : Dict ="""@@ """.join(lowerCAmelCase_ ) A__ : Tuple =word[:-4] A__ : Any =word return word def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : int =[] A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : Optional[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.merges_file , lowerCAmelCase_ ) return out_vocab_file, out_merge_file def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return A__ : Union[str, Any] =f.readlines() for lineTmp in lines: A__ : List[Any] =lineTmp.strip() A__ : Dict =line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) A__ : Tuple =line[:idx] A__ : Tuple =len(self.encoder )
687
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = (DDPMScheduler,) def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> Any: '''simple docstring''' A__ : Optional[Any] ={ """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**lowerCAmelCase_ ) return config def lowercase__ ( self : str ) -> Dict: '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def lowercase__ ( self : str ) -> Any: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> int: '''simple docstring''' A__ : str =self.scheduler_classes[0] A__ : int =self.get_scheduler_config() A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : List[str] =self.scheduler_classes[0] A__ : Any =self.get_scheduler_config() A__ : Any =scheduler_class(**lowerCAmelCase_ ) A__ : List[str] =len(lowerCAmelCase_ ) A__ : Optional[Any] =self.dummy_model() A__ : List[str] =self.dummy_sample_deter A__ : Optional[Any] =torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual A__ : Any =model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 A__ : List[Any] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ : Union[str, Any] =pred_prev_sample A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) ) A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def lowercase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' A__ : str =self.scheduler_classes[0] A__ : Any =self.get_scheduler_config(prediction_type="""v_prediction""" ) A__ : str =scheduler_class(**lowerCAmelCase_ ) A__ : int =len(lowerCAmelCase_ ) A__ : Dict =self.dummy_model() A__ : Optional[int] =self.dummy_sample_deter A__ : Optional[int] =torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual A__ : Any =model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 A__ : List[Any] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ : List[str] =pred_prev_sample A__ : Optional[Any] =torch.sum(torch.abs(lowerCAmelCase_ ) ) A__ : Tuple =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.scheduler_classes[0] A__ : List[str] =self.get_scheduler_config() A__ : List[str] =scheduler_class(**lowerCAmelCase_ ) A__ : Dict =[1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) A__ : Dict =scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: A__ : Dict =-1 else: A__ : int =timesteps[i + 1] A__ : List[Any] =scheduler.previous_timestep(lowerCAmelCase_ ) A__ : Dict =prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' A__ : List[str] =self.scheduler_classes[0] A__ : Optional[int] =self.get_scheduler_config() A__ : Dict =scheduler_class(**lowerCAmelCase_ ) A__ : Optional[Any] =[1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : Optional[int] =self.scheduler_classes[0] A__ : Optional[Any] =self.get_scheduler_config() A__ : List[str] =scheduler_class(**lowerCAmelCase_ ) A__ : List[Any] =[1_00, 87, 50, 1, 0] A__ : Tuple =len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Tuple: '''simple docstring''' A__ : Dict =self.scheduler_classes[0] A__ : Dict =self.get_scheduler_config() A__ : Any =scheduler_class(**lowerCAmelCase_ ) A__ : List[Any] =[scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
687
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int: """simple docstring""" A__ : Union[str, Any] =nn.functional.normalize(__snake_case ) A__ : Optional[Any] =nn.functional.normalize(__snake_case ) return torch.mm(__snake_case, normalized_text_embeds.t() ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ['CLIPEncoderLayer'] def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A__ : str =CLIPVisionModel(config.vision_config ) A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ ) A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ ) A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ ) @torch.no_grad() def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : Any =self.visual_projection(lowerCAmelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy() A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy() A__ : List[str] =[] A__ : Optional[int] =image_embeds.shape[0] for i in range(lowerCAmelCase_ ): A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ : Optional[Any] =special_cos_dist[i][concept_idx] A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) A__ : Dict =0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ : Optional[int] =cos_dist[i][concept_idx] A__ : List[str] =self.concept_embeds_weights[concept_idx].item() A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : List[Any] =self.visual_projection(lowerCAmelCase_ ) A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ) A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ : Dict =0.0 A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 ) A__ : Tuple =special_care * 0.01 A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int, __snake_case : Tuple ) -> List[Any]: """simple docstring""" A__ : Union[str, Any] =[0 for i in range(r + 1 )] # nc0 = 1 A__ : int =1 for i in range(1, n + 1 ): # to compute current row from previous row. A__ : int =min(__snake_case, __snake_case ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
687
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str: """simple docstring""" A__ : Optional[int] =[] for part_id in partition_order: A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(__snake_case ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : str =spark.range(100 ).repartition(1 ) A__ : List[str] =Spark(__snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Tuple =spark.range(10 ).repartition(2 ) A__ : List[str] =[1, 0] A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions. A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(10 ).repartition(1 ) A__ : List[str] =SparkExamplesIterable(__snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__snake_case ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A__ : Tuple =lambda __snake_case : x.reverse() A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] ) A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Any =spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : List[str] =spark.range(100 ).repartition(1 ) A__ : List[Any] =Spark(__snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
687
1
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : str=[32, 64, 1_28] , lowerCAmelCase_ : str=[1, 2, 1] , lowerCAmelCase_ : int=[2, 2, 4] , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=2.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : Dict=8 , lowerCAmelCase_ : Any=["stage1", "stage2"] , lowerCAmelCase_ : Union[str, Any]=[1, 2] , ) -> List[str]: '''simple docstring''' A__ : int =parent A__ : Optional[Any] =batch_size A__ : List[Any] =image_size A__ : List[Any] =patch_size A__ : Tuple =num_channels A__ : Union[str, Any] =embed_dim A__ : Any =hidden_sizes A__ : List[str] =depths A__ : str =num_heads A__ : Union[str, Any] =window_size A__ : Optional[Any] =mlp_ratio A__ : str =qkv_bias A__ : Optional[Any] =hidden_dropout_prob A__ : Tuple =attention_probs_dropout_prob A__ : str =drop_path_rate A__ : List[Any] =hidden_act A__ : Union[str, Any] =use_absolute_embeddings A__ : List[str] =patch_norm A__ : Dict =layer_norm_eps A__ : List[str] =initializer_range A__ : int =is_training A__ : List[str] =scope A__ : Tuple =use_labels A__ : Optional[Any] =type_sequence_label_size A__ : Any =encoder_stride A__ : Any =out_features A__ : Any =out_indices def lowercase__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : Union[str, Any] =None if self.use_labels: A__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Optional[Any] =self.get_config() return config, pixel_values, labels def lowercase__ ( self : List[Any] ) -> List[str]: '''simple docstring''' return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' A__ : Dict =FocalNetModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : Union[str, Any] =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) A__ : List[str] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =FocalNetBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[Any] =model(lowerCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None A__ : List[str] =None A__ : Any =FocalNetBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Tuple =FocalNetForMaskedImageModeling(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[Any] =model(lowerCAmelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ : Tuple =1 A__ : Any =FocalNetForMaskedImageModeling(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ : List[str] =model(lowerCAmelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any: '''simple docstring''' A__ : Tuple =self.type_sequence_label_size A__ : Optional[Any] =FocalNetForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : int =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ : Any =1 A__ : Tuple =FocalNetForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ : Union[str, Any] =model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() A__ , A__ , A__ : Union[str, Any] =config_and_inputs A__ : Dict ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) __snake_case = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) __snake_case = False __snake_case = False __snake_case = False __snake_case = False __snake_case = False def lowercase__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =FocalNetModelTester(self ) A__ : Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 , has_text_modality=lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' return def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' pass def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: A__ : Optional[Any] =model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ : Optional[Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: A__ : Dict =model_class(lowerCAmelCase_ ) A__ : int =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Union[str, Any] =[*signature.parameters.keys()] A__ : List[Any] =["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowercase__ ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Tuple: '''simple docstring''' A__ : Dict =model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): A__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) A__ : Optional[int] =outputs.hidden_states A__ : Any =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) # FocalNet has a different seq_length A__ : Optional[Any] =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) A__ : str =outputs.reshaped_hidden_states self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) A__ , A__ , A__ , A__ : Optional[Any] =reshaped_hidden_states[0].shape A__ : Optional[int] =( reshaped_hidden_states[0].view(lowerCAmelCase_ , lowerCAmelCase_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common() A__ : List[str] =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: A__ : Any =True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : Tuple =True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A__ , A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() A__ : Optional[Any] =3 A__ : Optional[Any] =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) A__ : int =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ : Tuple =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) A__ : Union[str, Any] =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: A__ : Any =True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : Any =True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) ) @slow def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Optional[int] =FocalNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' A__ , A__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() A__ : str =_config_zero_init(lowerCAmelCase_ ) for model_class in self.all_model_classes: A__ : List[Any] =model_class(config=lowerCAmelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def lowercase__ ( self : Any ) -> int: '''simple docstring''' A__ : str =FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase_ ) A__ : Any =self.default_image_processor A__ : Union[str, Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) A__ : int =image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): A__ : Dict =model(**lowerCAmelCase_ ) # verify the logits A__ : Dict =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) A__ : List[str] =torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 ) @require_torch class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = (FocalNetBackbone,) if is_torch_available() else () __snake_case = FocalNetConfig __snake_case = False def lowercase__ ( self : str ) -> Dict: '''simple docstring''' A__ : Union[str, Any] =FocalNetModelTester(self )
687
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : int = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def __lowerCamelCase ( __snake_case : Iterable[str], __snake_case : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" A__ : List[Any] =iter(__snake_case ) while True: A__ : str =tuple(itertools.islice(__snake_case, __snake_case ) ) if not chunk: return yield chunk def __lowerCamelCase ( __snake_case : str ) -> str: """simple docstring""" A__ : Optional[int] ="""""".join([c.upper() for c in dirty if c in string.ascii_letters] ) A__ : Union[str, Any] ="""""" if len(__snake_case ) < 2: return dirty for i in range(len(__snake_case ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__snake_case ) & 1: clean += "X" return clean def __lowerCamelCase ( __snake_case : str ) -> list[str]: """simple docstring""" A__ : List[str] ="""ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A__ : Optional[Any] =[] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__snake_case ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__snake_case ) return table def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str: """simple docstring""" A__ : Optional[int] =generate_table(__snake_case ) A__ : str =prepare_input(__snake_case ) A__ : str ="""""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__snake_case, 2 ): A__ , A__ : Dict =divmod(table.index(__snake_case ), 5 ) A__ , A__ : Dict =divmod(table.index(__snake_case ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str: """simple docstring""" A__ : Optional[int] =generate_table(__snake_case ) A__ : Tuple ="""""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__snake_case, 2 ): A__ , A__ : Optional[Any] =divmod(table.index(__snake_case ), 5 ) A__ , A__ : List[str] =divmod(table.index(__snake_case ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
687
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str: '''simple docstring''' super().__init__() A__ : Union[str, Any] =module A__ : Union[str, Any] =nn.Sequential( nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , ) A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109659552692574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 10 def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' # Models and tokenizer A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Models and tokenizer A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : str =self.model_abit.config self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) ) A__ : Union[str, Any] =config.to_dict() A__ : Any =config.to_diff_dict() A__ : Optional[Any] =config.to_json_string() def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' from bitsandbytes.nn import Paramsabit A__ : int =self.model_fpaa.get_memory_footprint() A__ : Optional[Any] =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A__ : Tuple =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCAmelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() A__ : Tuple =True A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" ) A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =self.model_fpaa.to(torch.floataa ) A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.half() # Check this does not throw an error A__ : int =self.model_fpaa.float() def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowercase__ ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Tuple ="""t5-small""" A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name ) A__ : Optional[int] ="""Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules A__ : Optional[Any] =None # test with `t5-small` A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Optional[Any] =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : List[str] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ ) A__ : Dict =modules def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Any =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Dict =model.generate(**lowerCAmelCase_ ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' super().setUp() # model_name A__ : Any ="""bigscience/bloom-560m""" A__ : List[Any] ="""t5-small""" # Different types of model A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Sequence classification model A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # CausalLM model A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Seq2seq model A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUp() def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A__ : Optional[int] =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : str ) -> int: '''simple docstring''' super().setUp() def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] ="""facebook/opt-350m""" super().setUp() def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A__ : int =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A__ : Dict =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCAmelCase_ ) ): A__ : int =LoRALayer(module.q_proj , rank=16 ) A__ : Any =LoRALayer(module.k_proj , rank=16 ) A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A__ : Any =model.forward(**lowerCAmelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCAmelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191854854152187
687
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : int = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'bert' def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3_05_22 , lowerCAmelCase_ : List[Any]=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Any=30_72 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Dict , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Any =vocab_size A__ : Any =hidden_size A__ : Dict =num_hidden_layers A__ : Optional[int] =num_attention_heads A__ : Union[str, Any] =hidden_act A__ : Any =intermediate_size A__ : Union[str, Any] =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Union[str, Any] =type_vocab_size A__ : List[Any] =initializer_range A__ : Union[str, Any] =layer_norm_eps A__ : Tuple =position_embedding_type A__ : Tuple =use_cache A__ : Any =classifier_dropout class lowerCamelCase ( lowercase_ ): '''simple docstring''' @property def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ : Union[str, Any] ={0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ : List[str] ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
687
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __snake_case : Optional[int] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Dict = logging.get_logger(__name__) __snake_case : int = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'vivit' def __init__( self : str , lowerCAmelCase_ : List[str]=2_24 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : int=[2, 16, 16] , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu_fast" , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Any=1e-06 , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : List[str] , ) -> List[Any]: '''simple docstring''' A__ : Union[str, Any] =hidden_size A__ : Optional[int] =num_hidden_layers A__ : List[Any] =num_attention_heads A__ : int =intermediate_size A__ : Optional[int] =hidden_act A__ : Optional[int] =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Optional[int] =initializer_range A__ : List[str] =layer_norm_eps A__ : str =image_size A__ : Dict =num_frames A__ : str =tubelet_size A__ : Union[str, Any] =num_channels A__ : List[Any] =qkv_bias super().__init__(**lowerCAmelCase_ )
687
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple: '''simple docstring''' A__ : Tuple =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : Optional[Any] =is_training A__ : Dict =use_input_lengths A__ : int =use_token_type_ids A__ : Union[str, Any] =use_labels A__ : Optional[Any] =gelu_activation A__ : List[Any] =sinusoidal_embeddings A__ : List[Any] =causal A__ : str =asm A__ : Tuple =n_langs A__ : Dict =vocab_size A__ : Optional[Any] =n_special A__ : Tuple =hidden_size A__ : Dict =num_hidden_layers A__ : int =num_attention_heads A__ : Optional[Any] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Optional[int] =type_sequence_label_size A__ : Tuple =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Optional[int] =summary_type A__ : int =use_proj A__ : Tuple =scope A__ : Union[str, Any] =bos_token_id def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_input_lengths: A__ : Tuple =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ : Optional[Any] =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ : Any =None A__ : Tuple =None A__ : Optional[Any] =None if self.use_labels: A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float() A__ : str =ids_tensor([self.batch_size] , self.num_choices ) A__ : Union[str, Any] =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XLMModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Tuple =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) A__ : List[Any] =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Tuple =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , ) A__ : Optional[Any] =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , ) ((A__) , ) : List[Any] =result_with_labels.to_tuple() A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) ((A__) , ) : Tuple =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =self.num_choices A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[int] =config_and_inputs A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __snake_case = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int: '''simple docstring''' A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) A__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =XLMModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : Tuple =min_length + idx + 1 A__ : Tuple =min_length + idx + 1 A__ : Dict =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) ) def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : str =min_length + idx + 1 A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , ) pass @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCAmelCase_ ) A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president A__ : Optional[Any] =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
687
1
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = VideoToVideoSDPipeline __snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'} __snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'} __snake_case = PipelineTesterMixin.required_optional_params - {'latents'} __snake_case = False # No `output_type`. __snake_case = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def lowercase__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) A__ : int =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) A__ : List[Any] =DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , ) torch.manual_seed(0 ) A__ : Optional[int] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A__ : List[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) A__ : List[Any] =CLIPTextModel(lowerCAmelCase_ ) A__ : List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A__ : str ={ """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=0 ) -> Union[str, Any]: '''simple docstring''' # 3 frames A__ : List[Any] =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) if str(lowerCAmelCase_ ).startswith("""mps""" ): A__ : Union[str, Any] =torch.manual_seed(lowerCAmelCase_ ) else: A__ : Union[str, Any] =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) A__ : Optional[int] ={ """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowercase__ ( self : Optional[Any] ) -> str: '''simple docstring''' A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator A__ : Optional[int] =self.get_dummy_components() A__ : Optional[int] =VideoToVideoSDPipeline(**lowerCAmelCase_ ) A__ : List[Any] =sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ ) A__ : Any ="""np""" A__ : str =sd_pipe(**lowerCAmelCase_ ).frames A__ : List[str] =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) A__ : int =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=5e-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowercase__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowercase__ ( self : str ) -> Any: '''simple docstring''' pass def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames A__ : Tuple =torch.Generator(device="""cpu""" ).manual_seed(0 ) A__ : Optional[int] =torch.randn((1, 10, 3, 10_24, 5_76) , generator=lowerCAmelCase_ ) A__ : int =video.to("""cuda""" ) A__ : Optional[int] ="""Spiderman is surfing""" A__ : Dict =pipe(lowerCAmelCase_ , video=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=3 , output_type="""pt""" ).frames A__ : Dict =np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
687
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
1
'''simple docstring''' import os def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ : int =os.path.join(os.path.dirname(__snake_case ), """num.txt""" ) with open(__snake_case ) as file_hand: return str(sum(int(__snake_case ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
687
'''simple docstring''' from __future__ import annotations import requests __snake_case : Union[str, Any] = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict: """simple docstring""" A__ : Union[str, Any] =wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}" raise ValueError(__snake_case ) A__ : Tuple =requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, ) if response.status_code == 429: raise requests.HTTPError A__ : Tuple =response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} A__ : Tuple ={} for id_ in range(__snake_case ): A__ : List[Any] ={ item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int | float | str ) -> tuple[int, int]: """simple docstring""" try: A__ : Union[str, Any] =float(__snake_case ) except ValueError: raise ValueError("""Please enter a valid number""" ) A__ : List[str] =decimal - int(__snake_case ) if fractional_part == 0: return int(__snake_case ), 1 else: A__ : Tuple =len(str(__snake_case ).split(""".""" )[1] ) A__ : Any =int(decimal * (10**number_of_frac_digits) ) A__ : Any =10**number_of_frac_digits A__ , A__ : List[Any] =denominator, numerator while True: A__ : Tuple =dividend % divisor if remainder == 0: break A__ , A__ : List[Any] =divisor, remainder A__ , A__ : Optional[int] =numerator / divisor, denominator / divisor return int(__snake_case ), int(__snake_case ) if __name__ == "__main__": print(F"""{decimal_to_fraction(2) = }""") print(F"""{decimal_to_fraction(89.0) = }""") print(F"""{decimal_to_fraction('67') = }""") print(F"""{decimal_to_fraction('45.0') = }""") print(F"""{decimal_to_fraction(1.5) = }""") print(F"""{decimal_to_fraction('6.25') = }""") print(F"""{decimal_to_fraction('78td') = }""")
687
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __snake_case : Union[str, Any] = logging.getLogger(__name__) __snake_case : int = tf.data.AUTOTUNE def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", ) parser.add_argument( """--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", ) parser.add_argument( """--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", ) parser.add_argument( """--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", ) parser.add_argument( """--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", ) parser.add_argument( """--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", ) parser.add_argument( """--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", ) parser.add_argument( """--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", ) parser.add_argument( """--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", ) parser.add_argument( """--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", ) parser.add_argument( """--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", ) parser.add_argument( """--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", ) parser.add_argument( """--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", ) parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" ) A__ : Optional[Any] =parser.parse_args() return args def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" try: if args.tpu_name: A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__snake_case ) tf.tpu.experimental.initialize_tpu_system(__snake_case ) return tpu def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" A__ : Any =0 for file in file_list: A__ : Optional[int] =file.split("""/""" )[-1] A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 ) A__ : str =int(__snake_case ) num_samples += sample_count return num_samples def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]: """simple docstring""" A__ : List[str] =count_samples(__snake_case ) A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case ) if shuffle: A__ : Optional[int] =dataset.shuffle(len(__snake_case ) ) A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) ) A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case ) if shuffle: assert shuffle_buffer_size is not None A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size ) A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case ) A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case ) A__ : Tuple =dataset.prefetch(__snake_case ) return dataset def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" if not args.no_tpu: A__ : Dict =initialize_tpu(__snake_case ) A__ : int =tf.distribute.TPUStrategy(__snake_case ) else: A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer ) A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) ) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}." ) A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." ) A__ : Optional[Any] =count_samples(__snake_case ) A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A__ : str =steps_per_epoch * args.num_epochs with strategy.scope(): A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A__ , A__ : Optional[Any] =create_optimizer( num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__snake_case, metrics=["""accuracy"""] ) def decode_fn(__snake_case : Tuple ): A__ : Dict ={ """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), } return tf.io.parse_single_example(__snake_case, __snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A__ : List[Any] =DataCollatorForLanguageModeling( tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" ) def mask_with_collator(__snake_case : Optional[int] ): # TF really needs an isin() function A__ : Union[str, Any] =( ~tf.cast(batch["""attention_mask"""], tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) A__ , A__ : List[str] =data_collator.tf_mask_tokens( batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, ) return batch A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, ) A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, ) A__ : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) ) model.fit( __snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __snake_case : str = parse_args() main(args)
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case, __snake_case ): raise TypeError("""only integers accepted as input""" ) else: A__ : List[Any] =str(abs(__snake_case ) ) A__ : List[str] =[list(__snake_case ) for char in range(len(__snake_case ) )] for index in range(len(__snake_case ) ): num_transpositions[index].pop(__snake_case ) return max( int("""""".join(list(__snake_case ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case : Union[str, Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : str ) -> str: """simple docstring""" A__ : Optional[Any] =0 # if input_string is "aba" than new_input_string become "a|b|a" A__ : str ="""""" A__ : str ="""""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring A__ , A__ : Tuple =0, 0 # length[i] shows the length of palindromic substring with center i A__ : Tuple =[1 for i in range(len(__snake_case ) )] # for each character in new_string find corresponding palindromic string A__ : int =0 for j in range(len(__snake_case ) ): A__ : Union[str, Any] =1 if j > r else min(length[l + r - j] // 2, r - j + 1 ) while ( j - k >= 0 and j + k < len(__snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 A__ : Optional[int] =2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: A__ : Dict =j - k + 1 # noqa: E741 A__ : List[Any] =j + k - 1 # update max_length and start position if max_length < length[j]: A__ : Dict =length[j] A__ : Any =j # create that string A__ : Optional[int] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
687
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case : Optional[int] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') __snake_case : int = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') __snake_case : Optional[Any] = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') __snake_case : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') __snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
687
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __snake_case : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[Any] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str: """simple docstring""" A__ : int =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Any ="""""" else: A__ : Optional[int] ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : Optional[int] =in_proj_weight[ : config.hidden_size, : ] A__ : str =in_proj_bias[: config.hidden_size] A__ : Optional[Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : List[Any] =in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Dict =dct.pop(__snake_case ) A__ : Tuple =val def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str: """simple docstring""" A__ : Tuple =ViTConfig() # patch_size if model_name[-1] == "8": A__ : Optional[Any] =8 # set labels if required if not base_model: A__ : Optional[Any] =1_000 A__ : str ="""huggingface/label-files""" A__ : Any ="""imagenet-1k-id2label.json""" A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : List[Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: A__ : str =384 A__ : Optional[Any] =1_536 A__ : Optional[Any] =12 A__ : Union[str, Any] =6 # load original model from torch hub A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys A__ : List[str] =original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if base_model: A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval() else: A__ : List[str] =ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor A__ : Union[str, Any] =ViTImageProcessor() A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Union[str, Any] =encoding["""pixel_values"""] A__ : Union[str, Any] =model(__snake_case ) if base_model: A__ : List[str] =original_model(__snake_case ) assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: A__ : Optional[int] =original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
687
1
'''simple docstring''' import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" A__ : Optional[Any] =torch.load(__snake_case, map_location="""cpu""" ) A__ : Optional[int] =chkpt["""model"""] # We have the base model one level deeper than the original XLM repository A__ : Any ={} for k, v in state_dict.items(): if "pred_layer" in k: A__ : str =v else: A__ : Dict =v A__ : Union[str, Any] =chkpt["""params"""] A__ : Dict ={n: v for n, v in config.items() if not isinstance(__snake_case, (torch.FloatTensor, numpy.ndarray) )} A__ : List[str] =chkpt["""dico_word2id"""] A__ : List[Any] ={s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""", """""" ): i for s, i in vocab.items()} # Save pytorch-model A__ : Optional[Any] =pytorch_dump_folder_path + """/""" + WEIGHTS_NAME A__ : Optional[Any] =pytorch_dump_folder_path + """/""" + CONFIG_NAME A__ : Optional[Any] =pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(__snake_case, __snake_case ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(__snake_case, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(__snake_case, indent=2 ) + """\n""" ) print(f"Save vocab file to {pytorch_config_dump_path}" ) with open(__snake_case, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(__snake_case, indent=2 ) + """\n""" ) if __name__ == "__main__": __snake_case : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __snake_case : Tuple = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
687
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'linear' __snake_case = 'cosine' __snake_case = 'cosine_with_restarts' __snake_case = 'polynomial' __snake_case = 'constant' __snake_case = 'constant_with_warmup' __snake_case = 'piecewise_constant' def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]: """simple docstring""" return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1.0, __snake_case ) ) return 1.0 return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]: """simple docstring""" A__ : str ={} A__ : Tuple =step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A__ , A__ : int =rule_str.split(""":""" ) A__ : Optional[int] =int(__snake_case ) A__ : List[Any] =float(__snake_case ) A__ : Union[str, Any] =value A__ : int =float(rule_list[-1] ) def create_rules_function(__snake_case : int, __snake_case : Dict ): def rule_func(__snake_case : int ) -> float: A__ : Any =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__snake_case ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A__ : Any =create_rules_function(__snake_case, __snake_case ) return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) return max( 0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : Dict ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]: """simple docstring""" A__ : Optional[int] =optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A__ : List[Any] =lr_init - lr_end A__ : Any =num_training_steps - num_warmup_steps A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps A__ : List[str] =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__snake_case, __snake_case, __snake_case ) __snake_case : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple: """simple docstring""" A__ : Tuple =SchedulerType(__snake_case ) A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__snake_case, last_epoch=__snake_case ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, ) return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
687
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class lowerCamelCase : '''simple docstring''' __snake_case = BlenderbotConfig __snake_case = {} __snake_case = 'gelu' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=99 , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Optional[Any]=37 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=20 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : int=0 , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =parent A__ : Optional[Any] =batch_size A__ : str =seq_length A__ : int =is_training A__ : int =use_labels A__ : str =vocab_size A__ : List[Any] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : Any =intermediate_size A__ : Optional[int] =hidden_dropout_prob A__ : List[Any] =attention_probs_dropout_prob A__ : Tuple =max_position_embeddings A__ : Tuple =eos_token_id A__ : Optional[Any] =pad_token_id A__ : List[Any] =bos_token_id def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : str =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ : Any =prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return config, inputs_dict def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> List[Any]: '''simple docstring''' A__ : Dict =TFBlenderbotModel(config=lowerCAmelCase_ ).get_decoder() A__ : str =inputs_dict["""input_ids"""] A__ : str =input_ids[:1, :] A__ : Optional[Any] =inputs_dict["""attention_mask"""][:1, :] A__ : Optional[Any] =inputs_dict["""head_mask"""] A__ : Union[str, Any] =1 # first forward pass A__ : List[str] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) A__ , A__ : Dict =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ : int =ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ : str =tf.concat([input_ids, next_tokens] , axis=-1 ) A__ : Optional[int] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ : int =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] A__ : List[str] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ : Tuple =output_from_no_past[:, -3:, random_slice_idx] A__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Union[str, Any], __snake_case : str, __snake_case : Optional[Any]=None, __snake_case : Dict=None, __snake_case : Optional[Any]=None, __snake_case : List[str]=None, __snake_case : Dict=None, ) -> str: """simple docstring""" if attention_mask is None: A__ : Tuple =tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: A__ : Dict =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: A__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __snake_case = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =TFBlenderbotModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] ) -> str: '''simple docstring''' A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) @require_tokenizers @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = ['My friends are cool but they eat too many carbs.'] __snake_case = 'facebook/blenderbot-400M-distill' @cached_property def lowercase__ ( self : Any ) -> Any: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' A__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowercase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.tokenizer(self.src_text , return_tensors="""tf""" ) A__ : Optional[int] =self.model.generate( model_inputs.input_ids , ) A__ : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase_ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : List[str] = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Dict , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> str: '''simple docstring''' super().__init__() self.register_modules( vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A__ : Dict =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' self.enable_attention_slicing(lowerCAmelCase_ ) @torch.no_grad() def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , **lowerCAmelCase_ : Any , ) -> Tuple: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : List[str] =1 elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : List[Any] =len(lowerCAmelCase_ ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(lowerCAmelCase_ )}." ) # get prompt text embeddings A__ : Optional[Any] =self.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) A__ : str =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A__ : List[Any] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A__ : Optional[int] =text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A__ : Optional[int] =self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A__ , A__ , A__ : Tuple =text_embeddings.shape A__ : Optional[int] =text_embeddings.repeat(1 , lowerCAmelCase_ , 1 ) A__ : Dict =text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A__ : int =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A__ : List[str] if negative_prompt is None: A__ : Union[str, Any] =[""""""] elif type(lowerCAmelCase_ ) is not type(lowerCAmelCase_ ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase_ )} !=" f" {type(lowerCAmelCase_ )}." ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : List[Any] =[negative_prompt] elif batch_size != len(lowerCAmelCase_ ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase_ )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: A__ : Dict =negative_prompt A__ : int =text_input_ids.shape[-1] A__ : Union[str, Any] =self.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""" , ) A__ : Dict =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A__ : Optional[int] =uncond_embeddings.shape[1] A__ : Optional[Any] =uncond_embeddings.repeat(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : int =uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A__ : Any =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A__ : str =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A__ : Optional[Any] =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A__ : Any =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A__ : int =torch.randn( lowerCAmelCase_ , generator=lowerCAmelCase_ , device="""cpu""" , dtype=lowerCAmelCase_ ).to(self.device ) A__ : Tuple =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="""cpu""" , dtype=lowerCAmelCase_ ).to( self.device ) else: A__ : str =torch.randn( lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ ) A__ : Union[str, Any] =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ ) else: if latents_reference.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A__ : List[str] =latents_reference.to(self.device ) A__ : Union[str, Any] =latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A__ : Union[str, Any] =(latents_shape[3] - latents_shape_reference[3]) // 2 A__ : int =(latents_shape[2] - latents_shape_reference[2]) // 2 A__ : Dict =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A__ : Any =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A__ : Optional[Any] =0 if dx < 0 else dx A__ : Optional[Any] =0 if dy < 0 else dy A__ : Tuple =max(-dx , 0 ) A__ : List[str] =max(-dy , 0 ) # import pdb # pdb.set_trace() A__ : Union[str, Any] =latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowerCAmelCase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A__ : List[Any] =self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A__ : Optional[Any] =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A__ : int ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A__ : Dict ={} if accepts_eta: A__ : List[Any] =eta for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance A__ : Union[str, Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A__ : Dict =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual A__ : Optional[int] =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample # perform guidance if do_classifier_free_guidance: A__ , A__ : List[Any] =noise_pred.chunk(2 ) A__ : Union[str, Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A__ : List[Any] =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =1 / 0.18215 * latents A__ : Union[str, Any] =self.vae.decode(lowerCAmelCase_ ).sample A__ : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A__ : int =self.feature_extractor(self.numpy_to_pil(lowerCAmelCase_ ) , return_tensors="""pt""" ).to( self.device ) A__ , A__ : int =self.safety_checker( images=lowerCAmelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A__ : Optional[int] =None if output_type == "pil": A__ : Union[str, Any] =self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[int] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import gc import threading import time import psutil import torch class lowerCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Any: '''simple docstring''' A__ : Union[str, Any] =psutil.Process() A__ : Union[str, Any] =False def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Optional[Any] =-1 while True: A__ : Any =max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] =True A__ : str =threading.Thread(target=self.peak_monitor ) A__ : Optional[Any] =True self.thread.start() def lowercase__ ( self : List[str] ) -> Tuple: '''simple docstring''' A__ : str =False self.thread.join() return self.cpu_memory_peak __snake_case : List[Any] = PeakCPUMemory() def __lowerCamelCase ( ) -> Dict: """simple docstring""" A__ : Optional[Any] ={"""time""": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem A__ : Dict =psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): A__ : int =torch.cuda.memory_allocated(__snake_case ) torch.cuda.reset_peak_memory_stats() return measures def __lowerCamelCase ( __snake_case : int ) -> Dict: """simple docstring""" A__ : List[Any] ={"""time""": time.time() - start_measures["""time"""]} gc.collect() torch.cuda.empty_cache() # CPU mem A__ : List[str] =(psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20 A__ : List[Any] =(cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): A__ : Dict =(torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20 A__ : Union[str, Any] =(torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20 return measures def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[Any] ) -> str: """simple docstring""" print(f"{description}:" ) print(f"- Time: {measures['time']:.2f}s" ) for i in range(torch.cuda.device_count() ): print(f"- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB" ) A__ : Optional[int] =measures[f"{i}-peak"] print(f"- GPU {i} peak: {peak:.2f}MiB" ) print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" ) print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
687
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : int, __snake_case : List[str], __snake_case : List[Any]=None, __snake_case : Dict=None ) -> int: """simple docstring""" if "." in tensor_name: A__ : str =tensor_name.split(""".""" ) for split in splits[:-1]: A__ : Optional[int] =getattr(__snake_case, __snake_case ) if new_module is None: raise ValueError(f"{module} has no attribute {split}." ) A__ : Optional[Any] =new_module A__ : Dict =splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." ) A__ : str =tensor_name in module._buffers A__ : Tuple =getattr(__snake_case, __snake_case ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) A__ : int =False A__ : Tuple =False if is_buffer or not is_bitsandbytes_available(): A__ : int =False A__ : Optional[Any] =False else: A__ : Optional[int] =hasattr(bnb.nn, """Params4bit""" ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit ) A__ : Tuple =isinstance(module._parameters[tensor_name], bnb.nn.IntaParams ) if is_abit or is_abit: A__ : str =module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ : Optional[Any] =old_value.to(__snake_case ) elif isinstance(__snake_case, torch.Tensor ): A__ : Union[str, Any] =value.to("""cpu""" ) if value.dtype == torch.inta: A__ : Optional[int] =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: A__ : int =torch.tensor(__snake_case, device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, __snake_case ) and fpaa_statistics is None: A__ : List[Any] =new_value.T A__ : Optional[Any] =old_value.__dict__ if is_abit: A__ : Tuple =bnb.nn.IntaParams(__snake_case, requires_grad=__snake_case, **__snake_case ).to(__snake_case ) elif is_abit: A__ : List[Any] =bnb.nn.Paramsabit(__snake_case, requires_grad=__snake_case, **__snake_case ).to(__snake_case ) A__ : Union[str, Any] =new_value if fpaa_statistics is not None: setattr(module.weight, """SCB""", fpaa_statistics.to(__snake_case ) ) else: if value is None: A__ : List[Any] =old_value.to(__snake_case ) elif isinstance(__snake_case, torch.Tensor ): A__ : List[str] =value.to(__snake_case ) else: A__ : Union[str, Any] =torch.tensor(__snake_case, device=__snake_case ) if is_buffer: A__ : Optional[int] =new_value else: A__ : List[Any] =nn.Parameter(__snake_case, requires_grad=old_value.requires_grad ) A__ : Tuple =new_value def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Union[str, Any]=None, __snake_case : Dict=None, __snake_case : Union[str, Any]=None, __snake_case : List[Any]=False ) -> int: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ : List[str] =[] current_key_name.append(__snake_case ) if (isinstance(__snake_case, nn.Linear ) or isinstance(__snake_case, __snake_case )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(__snake_case ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(__snake_case, __snake_case ): A__ , A__ : Tuple =module.weight.shape else: A__ : Union[str, Any] =module.in_features A__ : Optional[int] =module.out_features if quantization_config.quantization_method() == "llm_int8": A__ : Union[str, Any] =bnb.nn.LinearabitLt( __snake_case, __snake_case, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, ) A__ : Any =True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ : Union[str, Any] =bnb.nn.Linearabit( __snake_case, __snake_case, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, ) A__ : Union[str, Any] =True # Store the module class in case we need to transpose the weight later A__ : Tuple =type(__snake_case ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(__snake_case ) if len(list(module.children() ) ) > 0: A__ , A__ : int =_replace_with_bnb_linear( __snake_case, __snake_case, __snake_case, __snake_case, has_been_replaced=__snake_case, ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __lowerCamelCase ( __snake_case : int, __snake_case : Any=None, __snake_case : int=None, __snake_case : Optional[Any]=None ) -> List[str]: """simple docstring""" A__ : Any =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ : Optional[int] =_replace_with_bnb_linear( __snake_case, __snake_case, __snake_case, __snake_case ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __lowerCamelCase ( *__snake_case : Union[str, Any], **__snake_case : List[str] ) -> List[str]: """simple docstring""" warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""", __snake_case, ) return replace_with_bnb_linear(*__snake_case, **__snake_case ) def __lowerCamelCase ( *__snake_case : Union[str, Any], **__snake_case : Dict ) -> Tuple: """simple docstring""" warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""", __snake_case, ) return set_module_quantized_tensor_to_device(*__snake_case, **__snake_case ) def __lowerCamelCase ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" A__ : Tuple =deepcopy(__snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ : Optional[Any] =find_tied_parameters(__snake_case ) # For compatibility with Accelerate < 0.18 if isinstance(__snake_case, __snake_case ): A__ : Union[str, Any] =sum(list(tied_params.values() ), [] ) + list(tied_params.keys() ) else: A__ : Optional[Any] =sum(__snake_case, [] ) A__ : List[str] =len(__snake_case ) > 0 # Check if it is a base model A__ : Optional[Any] =not hasattr(__snake_case, model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ : List[Any] =list(model.named_children() ) A__ : Union[str, Any] =[list_modules[-1][0]] # add last module together with tied weights A__ : Optional[int] =set(__snake_case ) - set(__snake_case ) A__ : Union[str, Any] =list(set(__snake_case ) ) + list(__snake_case ) # remove ".weight" from the keys A__ : Dict =[""".weight""", """.bias"""] A__ : Union[str, Any] =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ : List[Any] =name.replace(__snake_case, """""" ) filtered_module_names.append(__snake_case ) return filtered_module_names
687
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'conditional_detr' __snake_case = ['past_key_values'] __snake_case = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Tuple =backbone_config.get("""model_type""" ) A__ : List[str] =CONFIG_MAPPING[backbone_model_type] A__ : Dict =config_class.from_dict(lowerCAmelCase_ ) A__ : int =use_timm_backbone A__ : List[Any] =backbone_config A__ : Optional[int] =num_channels A__ : Optional[int] =num_queries A__ : Union[str, Any] =d_model A__ : Optional[int] =encoder_ffn_dim A__ : Optional[Any] =encoder_layers A__ : int =encoder_attention_heads A__ : Optional[Any] =decoder_ffn_dim A__ : Tuple =decoder_layers A__ : Optional[Any] =decoder_attention_heads A__ : Tuple =dropout A__ : int =attention_dropout A__ : Dict =activation_dropout A__ : Union[str, Any] =activation_function A__ : List[str] =init_std A__ : str =init_xavier_std A__ : int =encoder_layerdrop A__ : List[Any] =decoder_layerdrop A__ : Tuple =encoder_layers A__ : Tuple =auxiliary_loss A__ : List[Any] =position_embedding_type A__ : int =backbone A__ : Optional[int] =use_pretrained_backbone A__ : str =dilation # Hungarian matcher A__ : Any =class_cost A__ : str =bbox_cost A__ : str =giou_cost # Loss coefficients A__ : Union[str, Any] =mask_loss_coefficient A__ : int =dice_loss_coefficient A__ : Union[str, Any] =cls_loss_coefficient A__ : List[str] =bbox_loss_coefficient A__ : str =giou_loss_coefficient A__ : Optional[Any] =focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return self.d_model def lowercase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : int =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A__ : str =self.backbone_config.to_dict() A__ : int =self.__class__.model_type return output class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowercase__ ( self : Any ) -> float: '''simple docstring''' return 1e-5 @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return 12
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : list ) -> list: """simple docstring""" if len(__snake_case ) <= 1: return [tuple(__snake_case )] A__ : List[Any] =[] def generate(__snake_case : int, __snake_case : list ): A__ : Optional[int] =[0] * n res.append(tuple(__snake_case ) ) A__ : List[str] =0 while i < n: if c[i] < i: if i % 2 == 0: A__ , A__ : Union[str, Any] =arr[i], arr[0] else: A__ , A__ : int =arr[i], arr[c[i]] res.append(tuple(__snake_case ) ) c[i] += 1 A__ : Union[str, Any] =0 else: A__ : Tuple =0 i += 1 generate(len(__snake_case ), __snake_case ) return res if __name__ == "__main__": __snake_case : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() __snake_case : Tuple = [int(item) for item in user_input.split(',')] print(heaps(arr))
687
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
1
'''simple docstring''' from typing import Any class lowerCamelCase : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : Any ) -> Tuple: '''simple docstring''' A__ : int =data A__ : int =None class lowerCamelCase : '''simple docstring''' def __init__( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : str =None def lowercase__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ : str =self.head while temp is not None: print(temp.data , end=""" """ ) A__ : Dict =temp.next print() def lowercase__ ( self : Dict , lowerCAmelCase_ : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =Node(lowerCAmelCase_ ) A__ : Tuple =self.head A__ : int =new_node def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' if node_data_a == node_data_a: return else: A__ : Tuple =self.head while node_a is not None and node_a.data != node_data_a: A__ : Dict =node_a.next A__ : Optional[Any] =self.head while node_a is not None and node_a.data != node_data_a: A__ : Any =node_a.next if node_a is None or node_a is None: return A__ , A__ : str =node_a.data, node_a.data if __name__ == "__main__": __snake_case : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
687
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
1
'''simple docstring''' import operator as op def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" A__ : List[str] =[] A__ : str =lambda __snake_case, __snake_case : int(x / y ) # noqa: E731 integer division operation A__ : Optional[int] ={ """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ), """Action""".center(12 ), """Stack""", sep=""" | """ ) print("""-""" * (30 + len(__snake_case )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__snake_case ) # append x to stack # output in tabular format print(x.rjust(8 ), ("""push(""" + x + """)""").ljust(12 ), """,""".join(__snake_case ), sep=""" | """ ) else: A__ : int =stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ), ("""pop(""" + b + """)""").ljust(12 ), """,""".join(__snake_case ), sep=""" | """ ) A__ : Tuple =stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ), ("""pop(""" + a + """)""").ljust(12 ), """,""".join(__snake_case ), sep=""" | """ ) stack.append( str(opr[x](int(__snake_case ), int(__snake_case ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ), ("""push(""" + a + x + b + """)""").ljust(12 ), """,""".join(__snake_case ), sep=""" | """, ) return int(stack[0] ) if __name__ == "__main__": __snake_case : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
687
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __snake_case : str = False class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int ="""A painting of a squirrel eating a burger """ A__ : Tuple =torch.manual_seed(0 ) A__ : int =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int =generator.manual_seed(0 ) A__ : Tuple =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict ="""A painting of a squirrel eating a burger """ A__ : Optional[int] =torch.manual_seed(0 ) A__ : List[str] =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __snake_case : Tuple = NewType('DataClass', Any) __snake_case : List[Any] = NewType('DataClassType', Any) def __lowerCamelCase ( __snake_case : Tuple ) -> str: """simple docstring""" if isinstance(__snake_case, __snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." ) def __lowerCamelCase ( __snake_case : list ) -> Callable[[str], Any]: """simple docstring""" A__ : Any ={str(__snake_case ): choice for choice in choices} return lambda __snake_case : str_to_choice.get(__snake_case, __snake_case ) def __lowerCamelCase ( *, __snake_case : Union[str, List[str]] = None, __snake_case : str = None, __snake_case : Any = dataclasses.MISSING, __snake_case : Callable[[], Any] = dataclasses.MISSING, __snake_case : dict = None, **__snake_case : Dict, ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls A__ : Optional[int] ={} if aliases is not None: A__ : str =aliases if help is not None: A__ : Any =help return dataclasses.field(metadata=__snake_case, default=__snake_case, default_factory=__snake_case, **__snake_case ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase_ : Tuple ) -> Dict: '''simple docstring''' # To make the default appear when using --help if "formatter_class" not in kwargs: A__ : List[Any] =ArgumentDefaultsHelpFormatter super().__init__(**lowerCAmelCase_ ) if dataclasses.is_dataclass(lowerCAmelCase_ ): A__ : Any =[dataclass_types] A__ : Dict =list(lowerCAmelCase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowerCAmelCase_ ) @staticmethod def lowercase__ ( lowerCAmelCase_ : ArgumentParser , lowerCAmelCase_ : dataclasses.Field ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[Any] =f"--{field.name}" A__ : str =field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , lowerCAmelCase_ ): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""" ) A__ : Union[str, Any] =kwargs.pop("""aliases""" , [] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Optional[int] =[aliases] A__ : Any =getattr(field.type , """__origin__""" , field.type ) if origin_type is Union or (hasattr(lowerCAmelCase_ , """UnionType""" ) and isinstance(lowerCAmelCase_ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f" Problem encountered in field '{field.name}'." ) if type(lowerCAmelCase_ ) not in field.type.__args__: # filter `str` in Union A__ : str =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] A__ : Tuple =getattr(field.type , """__origin__""" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) A__ : List[Any] =( field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1] ) A__ : List[Any] =getattr(field.type , """__origin__""" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) A__ : List[str] ={} if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )): if origin_type is Literal: A__ : Dict =field.type.__args__ else: A__ : List[str] =[x.value for x in field.type] A__ : Union[str, Any] =make_choice_type_function(kwargs["""choices"""] ) if field.default is not dataclasses.MISSING: A__ : Any =field.default else: A__ : Optional[Any] =True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument A__ : Union[str, Any] =copy(lowerCAmelCase_ ) # Hack because type=bool in argparse does not behave as we want. A__ : str =string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. A__ : Union[str, Any] =False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way A__ : List[Any] =default # This tells argparse we accept 0 or 1 value after --field_name A__ : Optional[Any] ="""?""" # This is the value that will get picked if we do --field_name (without value) A__ : int =True elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Optional[int] =field.type.__args__[0] A__ : Dict ="""+""" if field.default_factory is not dataclasses.MISSING: A__ : Any =field.default_factory() elif field.default is dataclasses.MISSING: A__ : Optional[int] =True else: A__ : Union[str, Any] =field.type if field.default is not dataclasses.MISSING: A__ : Optional[int] =field.default elif field.default_factory is not dataclasses.MISSING: A__ : Union[str, Any] =field.default_factory() else: A__ : int =True parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): A__ : Union[str, Any] =False parser.add_argument(f"--no_{field.name}" , action="""store_false""" , dest=field.name , **lowerCAmelCase_ ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : DataClassType ) -> Union[str, Any]: '''simple docstring''' if hasattr(lowerCAmelCase_ , """_argument_group_name""" ): A__ : str =self.add_argument_group(dtype._argument_group_name ) else: A__ : str =self try: A__ : Dict[str, type] =get_type_hints(lowerCAmelCase_ ) except NameError: raise RuntimeError( f"Type resolution failed for {dtype}. Try declaring the class in global scope or " """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ): A__ : List[str] =""".""".join(map(lowerCAmelCase_ , sys.version_info[:3] ) ) raise RuntimeError( f"Type resolution failed for {dtype} on Python {python_version}. Try removing " """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""" ) from ex raise for field in dataclasses.fields(lowerCAmelCase_ ): if not field.init: continue A__ : List[Any] =type_hints[field.name] self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=None , ) -> Tuple[DataClass, ...]: '''simple docstring''' if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): A__ : Any =[] if args_filename: args_files.append(Path(lowerCAmelCase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values A__ : Optional[Any] =ArgumentParser() args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action="""append""" ) # Use only remaining args for further parsing (remove the args_file_flag) A__ , A__ : List[Any] =args_file_parser.parse_known_args(args=lowerCAmelCase_ ) A__ : Tuple =vars(lowerCAmelCase_ ).get(args_file_flag.lstrip("""-""" ) , lowerCAmelCase_ ) if cmd_args_file_paths: args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] ) A__ : str =[] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last A__ : str =file_args + args if args is not None else file_args + sys.argv[1:] A__ , A__ : Optional[int] =self.parse_known_args(args=lowerCAmelCase_ ) A__ : List[str] =[] for dtype in self.dataclass_types: A__ : Dict ={f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init} A__ : Tuple ={k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys} for k in keys: delattr(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : str =dtype(**lowerCAmelCase_ ) outputs.append(lowerCAmelCase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowerCAmelCase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" ) return (*outputs,) def lowercase__ ( self : int , lowerCAmelCase_ : Dict[str, Any] , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: '''simple docstring''' A__ : List[str] =set(args.keys() ) A__ : Union[str, Any] =[] for dtype in self.dataclass_types: A__ : List[str] ={f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init} A__ : Any ={k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) A__ : List[Any] =dtype(**lowerCAmelCase_ ) outputs.append(lowerCAmelCase_ ) if not allow_extra_keys and unused_keys: raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}" ) return tuple(lowerCAmelCase_ ) def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: '''simple docstring''' with open(Path(lowerCAmelCase_ ) , encoding="""utf-8""" ) as open_json_file: A__ : int =json.loads(open_json_file.read() ) A__ : Dict =self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: '''simple docstring''' A__ : Dict =self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
687
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Optional[Any] =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Optional[Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' A__ : Dict =self.encoder(lowerCAmelCase_ ) A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ ) else: A__ : List[str] =h A__ : Dict =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' A__ : Optional[int] =sample A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents A__ : Tuple =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
687
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : '''simple docstring''' __snake_case = LEDConfig __snake_case = {} __snake_case = 'gelu' def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=20 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=4 , ) -> Tuple: '''simple docstring''' A__ : Optional[int] =parent A__ : int =batch_size A__ : Any =seq_length A__ : Tuple =is_training A__ : List[Any] =use_labels A__ : List[str] =vocab_size A__ : List[Any] =hidden_size A__ : List[Any] =num_hidden_layers A__ : List[Any] =num_attention_heads A__ : Dict =intermediate_size A__ : List[str] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Union[str, Any] =max_position_embeddings A__ : str =eos_token_id A__ : Optional[int] =pad_token_id A__ : List[str] =bos_token_id A__ : List[str] =attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A__ : Dict =self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A__ : Any =( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ : Dict =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ : Optional[int] =tf.concat([input_ids, eos_tensor] , axis=1 ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : str =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A__ : str =prepare_led_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Union[str, Any] =tf.concat( [tf.zeros_like(lowerCAmelCase_ )[:, :-1], tf.ones_like(lowerCAmelCase_ )[:, -1:]] , axis=-1 , ) A__ : Dict =global_attention_mask return config, inputs_dict def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] =TFLEDModel(config=lowerCAmelCase_ ).get_decoder() A__ : List[str] =inputs_dict["""input_ids"""] A__ : List[str] =input_ids[:1, :] A__ : int =inputs_dict["""attention_mask"""][:1, :] A__ : Tuple =1 # first forward pass A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) A__ , A__ : Union[str, Any] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ : List[str] =tf.concat([input_ids, next_tokens] , axis=-1 ) A__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ : List[str] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ : str =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ : Any =output_from_no_past[:, -3:, random_slice_idx] A__ : Union[str, Any] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : int=None, __snake_case : Optional[Any]=None, __snake_case : List[Any]=None, __snake_case : Dict=None, ) -> int: """simple docstring""" if attention_mask is None: A__ : Optional[int] =tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: A__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: A__ : Optional[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __snake_case = (TFLEDForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False __snake_case = False def lowercase__ ( self : Tuple ) -> List[Any]: '''simple docstring''' A__ : Any =TFLEDModelTester(self ) A__ : Any =ConfigTester(self , config_class=lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> str: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ , A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() A__ : Optional[int] =tf.zeros_like(inputs_dict["""attention_mask"""] ) A__ : int =2 A__ : str =tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) A__ : str =True A__ : Union[str, Any] =self.model_tester.seq_length A__ : Any =self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowerCAmelCase_ : Any ): A__ : List[str] =outputs.decoder_attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowerCAmelCase_ : List[Any] ): A__ : int =[t.numpy() for t in outputs.encoder_attentions] A__ : List[str] =[t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A__ : Optional[int] =True A__ : Union[str, Any] =False A__ : Any =False A__ : Union[str, Any] =model_class(lowerCAmelCase_ ) A__ : Tuple =model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) A__ : str =len(lowerCAmelCase_ ) self.assertEqual(config.output_hidden_states , lowerCAmelCase_ ) check_encoder_attentions_output(lowerCAmelCase_ ) if self.is_encoder_decoder: A__ : Any =model_class(lowerCAmelCase_ ) A__ : Optional[int] =model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , lowerCAmelCase_ ) check_decoder_attentions_output(lowerCAmelCase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A__ : Optional[Any] =True A__ : int =model_class(lowerCAmelCase_ ) A__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , lowerCAmelCase_ ) check_encoder_attentions_output(lowerCAmelCase_ ) # Check attention is always last and order is fine A__ : Optional[int] =True A__ : List[Any] =True A__ : Optional[Any] =model_class(lowerCAmelCase_ ) A__ : str =model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase_ ) ) self.assertEqual(model.config.output_hidden_states , lowerCAmelCase_ ) check_encoder_attentions_output(lowerCAmelCase_ ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' pass def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' # TODO: Head-masking not yet implement pass def __lowerCamelCase ( __snake_case : List[str] ) -> str: """simple docstring""" return tf.constant(__snake_case, dtype=tf.intaa ) __snake_case : Optional[int] = 1E-4 @slow @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' A__ : int =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here A__ : Optional[int] =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) A__ : int =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) A__ : str =prepare_led_inputs_dict(model.config , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : str =model(**lowerCAmelCase_ )[0] A__ : Tuple =(1, 10_24, 7_68) self.assertEqual(output.shape , lowerCAmelCase_ ) # change to expected output here A__ : List[Any] =tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-3 ) def lowercase__ ( self : str ) -> Tuple: '''simple docstring''' A__ : Dict =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here A__ : str =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) A__ : Optional[int] =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) A__ : Optional[Any] =prepare_led_inputs_dict(model.config , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =model(**lowerCAmelCase_ )[0] A__ : List[Any] =(1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , lowerCAmelCase_ ) # change to expected output here A__ : Optional[Any] =tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-3 , rtol=1e-3 )
687
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __snake_case : str = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __snake_case : List[Any] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str: """simple docstring""" A__ : Optional[int] =set() A__ : Optional[int] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ : str =char A__ : List[Any] =set(__snake_case ) return pairs class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =vocab_file A__ : Any =merges_file A__ : Union[str, Any] ={} A__ : Optional[int] =0 A__ : List[Any] =1 A__ : Tuple =2 A__ : Dict =3 self.add_from_file(lowerCAmelCase_ ) A__ : List[str] ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: A__ : str =merges_handle.read().split("""\n""" )[:-1] A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges] A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Dict ={} def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ : Dict =[self.cls_token_id] A__ : Union[str, Any] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ : int =tuple(lowerCAmelCase_ ) A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) A__ : Tuple =get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ : Tuple =bigram A__ : Optional[int] =[] A__ : Tuple =0 while i < len(lowerCAmelCase_ ): try: A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ : Union[str, Any] =j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ : Dict =tuple(lowerCAmelCase_ ) A__ : Dict =new_word if len(lowerCAmelCase_ ) == 1: break else: A__ : str =get_pairs(lowerCAmelCase_ ) A__ : Dict ="""@@ """.join(lowerCAmelCase_ ) A__ : Tuple =word[:-4] A__ : Any =word return word def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : int =[] A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : Optional[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.merges_file , lowerCAmelCase_ ) return out_vocab_file, out_merge_file def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return A__ : Union[str, Any] =f.readlines() for lineTmp in lines: A__ : List[Any] =lineTmp.strip() A__ : Dict =line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) A__ : Tuple =line[:idx] A__ : Tuple =len(self.encoder )
687
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : List[str] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> None: '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int: """simple docstring""" A__ : Union[str, Any] =nn.functional.normalize(__snake_case ) A__ : Optional[Any] =nn.functional.normalize(__snake_case ) return torch.mm(__snake_case, normalized_text_embeds.t() ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ['CLIPEncoderLayer'] def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A__ : str =CLIPVisionModel(config.vision_config ) A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ ) A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ ) A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ ) @torch.no_grad() def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : Any =self.visual_projection(lowerCAmelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy() A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy() A__ : List[str] =[] A__ : Optional[int] =image_embeds.shape[0] for i in range(lowerCAmelCase_ ): A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ : Optional[Any] =special_cos_dist[i][concept_idx] A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) A__ : Dict =0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ : Optional[int] =cos_dist[i][concept_idx] A__ : List[str] =self.concept_embeds_weights[concept_idx].item() A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : List[Any] =self.visual_projection(lowerCAmelCase_ ) A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ) A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ : Dict =0.0 A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 ) A__ : Tuple =special_care * 0.01 A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A__ : List[str] =str(bin(__snake_case ) ) binary_number += "0" * shift_amount return binary_number def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A__ : List[Any] =str(bin(__snake_case ) )[2:] if shift_amount >= len(__snake_case ): return "0b0" A__ : Dict =binary_number[: len(__snake_case ) - shift_amount] return "0b" + shifted_binary_number def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> str: """simple docstring""" if number >= 0: # Get binary representation of positive number A__ : Optional[int] ="""0""" + str(bin(__snake_case ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number A__ : Tuple =len(bin(__snake_case )[3:] ) # Find 2's complement of number A__ : Union[str, Any] =bin(abs(__snake_case ) - (1 << binary_number_length) )[3:] A__ : Dict =( """1""" + """0""" * (binary_number_length - len(__snake_case )) + binary_number ) if shift_amount >= len(__snake_case ): return "0b" + binary_number[0] * len(__snake_case ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__snake_case ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
687
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str: """simple docstring""" A__ : Optional[int] =[] for part_id in partition_order: A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(__snake_case ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : str =spark.range(100 ).repartition(1 ) A__ : List[str] =Spark(__snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Tuple =spark.range(10 ).repartition(2 ) A__ : List[str] =[1, 0] A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions. A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(10 ).repartition(1 ) A__ : List[str] =SparkExamplesIterable(__snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__snake_case ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A__ : Tuple =lambda __snake_case : x.reverse() A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] ) A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Any =spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : List[str] =spark.range(100 ).repartition(1 ) A__ : List[Any] =Spark(__snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : list ) -> list: """simple docstring""" if len(__snake_case ) <= 1: return [tuple(__snake_case )] A__ : int =[] def generate(__snake_case : int, __snake_case : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1, __snake_case ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ : Any =arr[k - 1], arr[i] else: # k is odd A__ , A__ : List[str] =arr[k - 1], arr[0] generate(k - 1, __snake_case ) generate(len(__snake_case ), __snake_case ) return res if __name__ == "__main__": __snake_case : int = input('Enter numbers separated by a comma:\n').strip() __snake_case : Optional[Any] = [int(item) for item in user_input.split(',')] print(heaps(arr))
687
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : int = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase_ : int ) -> int: '''simple docstring''' A__ : List[str] =data def __iter__( self : List[Any] ) -> int: '''simple docstring''' for element in self.data: yield element def __lowerCamelCase ( __snake_case : Union[str, Any]=True ) -> Optional[int]: """simple docstring""" A__ : List[str] =Accelerator(even_batches=__snake_case ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __lowerCamelCase ( __snake_case : Accelerator, __snake_case : int, __snake_case : int, __snake_case : bool = False ) -> str: """simple docstring""" if iterable: A__ : Optional[Any] =DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) ) else: A__ : List[str] =TensorDataset(torch.as_tensor(range(__snake_case ) ) ) A__ : Union[str, Any] =DataLoader(__snake_case, batch_size=__snake_case ) A__ : Any =accelerator.prepare(__snake_case ) return dl def __lowerCamelCase ( __snake_case : Accelerator, __snake_case : int, __snake_case : int, __snake_case : List[int], __snake_case : List[int], ) -> Any: """simple docstring""" A__ : str =create_dataloader(accelerator=__snake_case, dataset_size=__snake_case, batch_size=__snake_case ) A__ : Optional[Any] =[len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Any =create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __snake_case, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1, 1], ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __snake_case, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 2], ) def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[Any] =create_accelerator(even_batches=__snake_case ) verify_dataloader_batch_sizes( __snake_case, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1], ) verify_dataloader_batch_sizes( __snake_case, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 1], ) def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Dict =create_accelerator(even_batches=__snake_case ) A__ : List[Any] =torch.nn.Linear(1, 1 ) A__ : Optional[int] =accelerator.prepare(__snake_case ) A__ : Dict =create_dataloader(__snake_case, dataset_size=3, batch_size=1 ) A__ : List[str] =[] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__snake_case ): A__ : Optional[Any] =ddp_model(batch[0].float() ) A__ : Optional[int] =output.sum() loss.backward() batch_idxs.append(__snake_case ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __lowerCamelCase ( __snake_case : str ) -> Any: """simple docstring""" with warnings.catch_warnings(record=__snake_case ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category, __snake_case ) assert "only supported for multi-GPU" in str(w[-1].message ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Any =True A__ : int =False A__ : List[Any] =create_accelerator(even_batches=__snake_case ) A__ : int =torch.nn.Linear(1, 1 ) A__ : Optional[Any] =accelerator.prepare(__snake_case ) A__ : str =create_dataloader(__snake_case, dataset_size=3, batch_size=1 ) A__ : Optional[int] =create_dataloader(__snake_case, dataset_size=3, batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ): A__ : str =train_dl.batch_sampler.even_batches A__ : str =valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ : Any =True A__ : Optional[int] =False A__ : List[Any] =create_accelerator(even_batches=__snake_case ) A__ : int =torch.nn.Linear(1, 1 ) A__ : Union[str, Any] =accelerator.prepare(__snake_case ) create_dataloader(__snake_case, dataset_size=3, batch_size=1, iterable=__snake_case ) A__ : Optional[Any] =create_dataloader(__snake_case, dataset_size=3, batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("""ignore""" ) try: with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ): A__ : Tuple =batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Union[str, Any] =create_accelerator() A__ : List[Any] =torch.nn.Linear(1, 1 ) A__ : Union[str, Any] =accelerator.prepare(__snake_case ) create_dataloader(__snake_case, dataset_size=3, batch_size=1, iterable=__snake_case ) with warnings.catch_warnings(record=__snake_case ) as w: with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ): pass assert issubclass(w[-1].category, __snake_case ) assert "only supported for map-style datasets" in str(w[-1].message ) def __lowerCamelCase ( ) -> str: """simple docstring""" A__ : List[Any] =create_accelerator() accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" ) test_default_ensures_even_batch_sizes() accelerator.print("""Run tests with even_batches disabled""" ) test_can_disable_even_batches() accelerator.print("""Test joining uneven inputs""" ) test_can_join_uneven_inputs() accelerator.print("""Test overriding even_batches when joining uneven inputs""" ) test_join_can_override_even_batches() accelerator.print("""Test overriding even_batches for mixed dataloader types""" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("""Test join with non DDP distributed raises warning""" ) A__ : Dict =accelerator.state.distributed_type A__ : Union[str, Any] =DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__snake_case ) A__ : Any =original_state if __name__ == "__main__": main()
687
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str: '''simple docstring''' super().__init__() A__ : Union[str, Any] =module A__ : Union[str, Any] =nn.Sequential( nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , ) A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109659552692574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 10 def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' # Models and tokenizer A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Models and tokenizer A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : str =self.model_abit.config self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) ) A__ : Union[str, Any] =config.to_dict() A__ : Any =config.to_diff_dict() A__ : Optional[Any] =config.to_json_string() def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' from bitsandbytes.nn import Paramsabit A__ : int =self.model_fpaa.get_memory_footprint() A__ : Optional[Any] =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A__ : Tuple =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCAmelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() A__ : Tuple =True A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" ) A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =self.model_fpaa.to(torch.floataa ) A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.half() # Check this does not throw an error A__ : int =self.model_fpaa.float() def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowercase__ ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Tuple ="""t5-small""" A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name ) A__ : Optional[int] ="""Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules A__ : Optional[Any] =None # test with `t5-small` A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Optional[Any] =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : List[str] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ ) A__ : Dict =modules def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Any =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Dict =model.generate(**lowerCAmelCase_ ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' super().setUp() # model_name A__ : Any ="""bigscience/bloom-560m""" A__ : List[Any] ="""t5-small""" # Different types of model A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Sequence classification model A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # CausalLM model A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Seq2seq model A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUp() def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A__ : Optional[int] =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : str ) -> int: '''simple docstring''' super().setUp() def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] ="""facebook/opt-350m""" super().setUp() def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A__ : int =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A__ : Dict =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCAmelCase_ ) ): A__ : int =LoRALayer(module.q_proj , rank=16 ) A__ : Any =LoRALayer(module.k_proj , rank=16 ) A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A__ : Any =model.forward(**lowerCAmelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCAmelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191854854152187
687
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __lowerCamelCase ( __snake_case : str, __snake_case : float | Decimal, __snake_case : float = 10**-10 ) -> float: """simple docstring""" A__ : str =a while True: A__ : List[Any] =Decimal(__snake_case ) - ( Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__snake_case ) ) < precision: # noqa: S307 return float(__snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
687
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __snake_case : Optional[int] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int = 4_000_000 ) -> int: """simple docstring""" A__ : Any =[0, 1] A__ : Dict =0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 A__ : int =0 for j in range(len(__snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F"""{solution() = }""")
687
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple: '''simple docstring''' A__ : Tuple =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : Optional[Any] =is_training A__ : Dict =use_input_lengths A__ : int =use_token_type_ids A__ : Union[str, Any] =use_labels A__ : Optional[Any] =gelu_activation A__ : List[Any] =sinusoidal_embeddings A__ : List[Any] =causal A__ : str =asm A__ : Tuple =n_langs A__ : Dict =vocab_size A__ : Optional[Any] =n_special A__ : Tuple =hidden_size A__ : Dict =num_hidden_layers A__ : int =num_attention_heads A__ : Optional[Any] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Optional[int] =type_sequence_label_size A__ : Tuple =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Optional[int] =summary_type A__ : int =use_proj A__ : Tuple =scope A__ : Union[str, Any] =bos_token_id def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_input_lengths: A__ : Tuple =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ : Optional[Any] =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ : Any =None A__ : Tuple =None A__ : Optional[Any] =None if self.use_labels: A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float() A__ : str =ids_tensor([self.batch_size] , self.num_choices ) A__ : Union[str, Any] =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XLMModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Tuple =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) A__ : List[Any] =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Tuple =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , ) A__ : Optional[Any] =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , ) ((A__) , ) : List[Any] =result_with_labels.to_tuple() A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) ((A__) , ) : Tuple =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =self.num_choices A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[int] =config_and_inputs A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __snake_case = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int: '''simple docstring''' A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) A__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =XLMModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : Tuple =min_length + idx + 1 A__ : Tuple =min_length + idx + 1 A__ : Dict =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) ) def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : str =min_length + idx + 1 A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , ) pass @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCAmelCase_ ) A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president A__ : Optional[Any] =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
687
1
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __snake_case : Union[str, Any] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. __snake_case : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) __snake_case : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __snake_case : str = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)') __snake_case : Optional[int] = { 'DecisionTransformerConfig', 'EncoderDecoderConfig', 'MusicgenConfig', 'RagConfig', 'SpeechEncoderDecoderConfig', 'TimmBackboneConfig', 'VisionEncoderDecoderConfig', 'VisionTextDualEncoderConfig', 'LlamaConfig', } def __lowerCamelCase ( __snake_case : Any ) -> Dict: """simple docstring""" A__ : Tuple =None # source code of `config_class` A__ : List[Any] =inspect.getsource(__snake_case ) A__ : int =_re_checkpoint.findall(__snake_case ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): A__ : str =ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link A__ : Tuple =f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: A__ : Optional[int] =ckpt_name break return checkpoint def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ : Any =[] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue A__ : int =get_checkpoint_from_config_class(__snake_case ) A__ : Any =config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__snake_case ) if len(__snake_case ) > 0: A__ : Tuple ="""\n""".join(sorted(__snake_case ) ) raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
687
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
1
'''simple docstring''' from collections import deque def __lowerCamelCase ( __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : List[str] =len(__snake_case ) A__ : Optional[Any] =deque() A__ : Tuple =[False for _ in range(__snake_case )] A__ : Any =[-1 for _ in range(__snake_case )] A__ : List[str] =index_of[:] def strong_connect(__snake_case : Any, __snake_case : Optional[int], __snake_case : Union[str, Any] ): A__ : Dict =index # the number when this node is seen A__ : Optional[Any] =index # lowest rank node reachable from here index += 1 stack.append(__snake_case ) A__ : Dict =True for w in g[v]: if index_of[w] == -1: A__ : List[str] =strong_connect(__snake_case, __snake_case, __snake_case ) A__ : str =( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: A__ : List[str] =( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: A__ : Any =[] A__ : Tuple =stack.pop() A__ : List[str] =False component.append(__snake_case ) while w != v: A__ : Union[str, Any] =stack.pop() A__ : List[Any] =False component.append(__snake_case ) components.append(__snake_case ) return index A__ : Any =[] for v in range(__snake_case ): if index_of[v] == -1: strong_connect(__snake_case, 0, __snake_case ) return components def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any ) -> List[str]: """simple docstring""" A__ : Dict =[[] for _ in range(__snake_case )] for u, v in edges: g[u].append(__snake_case ) return g if __name__ == "__main__": # Test __snake_case : Optional[int] = 7 __snake_case : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6] __snake_case : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5] __snake_case : str = [(u, v) for u, v in zip(source, target)] __snake_case : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
687
'''simple docstring''' from __future__ import annotations import requests __snake_case : Union[str, Any] = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict: """simple docstring""" A__ : Union[str, Any] =wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}" raise ValueError(__snake_case ) A__ : Tuple =requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, ) if response.status_code == 429: raise requests.HTTPError A__ : Tuple =response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} A__ : Tuple ={} for id_ in range(__snake_case ): A__ : List[Any] ={ item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
687
1
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = ComputeEnvironment.AMAZON_SAGEMAKER __snake_case = True __snake_case = 'ml.p3.2xlarge' __snake_case = 'accelerate_sagemaker_execution_role' __snake_case = 'hf-sm' __snake_case = 'us-east-1' __snake_case = 1 __snake_case = 'accelerate-sagemaker-1' __snake_case = '1.6' __snake_case = '4.4' __snake_case = 'train.py' __snake_case = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] __snake_case = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' # If no defaults are changed, `to_kwargs` returns an empty dict. A__ : Optional[Any] =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["""model_name_or_path"""] , lowerCAmelCase_ ) assert isinstance(converted_args["""do_train"""] , lowerCAmelCase_ ) assert isinstance(converted_args["""epochs"""] , lowerCAmelCase_ ) assert isinstance(converted_args["""learning_rate"""] , lowerCAmelCase_ ) assert isinstance(converted_args["""max_steps"""] , lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
687
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __snake_case : Union[str, Any] = logging.getLogger(__name__) __snake_case : int = tf.data.AUTOTUNE def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", ) parser.add_argument( """--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", ) parser.add_argument( """--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", ) parser.add_argument( """--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", ) parser.add_argument( """--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", ) parser.add_argument( """--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", ) parser.add_argument( """--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", ) parser.add_argument( """--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", ) parser.add_argument( """--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", ) parser.add_argument( """--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", ) parser.add_argument( """--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", ) parser.add_argument( """--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", ) parser.add_argument( """--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", ) parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" ) A__ : Optional[Any] =parser.parse_args() return args def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" try: if args.tpu_name: A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__snake_case ) tf.tpu.experimental.initialize_tpu_system(__snake_case ) return tpu def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" A__ : Any =0 for file in file_list: A__ : Optional[int] =file.split("""/""" )[-1] A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 ) A__ : str =int(__snake_case ) num_samples += sample_count return num_samples def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]: """simple docstring""" A__ : List[str] =count_samples(__snake_case ) A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case ) if shuffle: A__ : Optional[int] =dataset.shuffle(len(__snake_case ) ) A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) ) A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case ) if shuffle: assert shuffle_buffer_size is not None A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size ) A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case ) A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case ) A__ : Tuple =dataset.prefetch(__snake_case ) return dataset def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" if not args.no_tpu: A__ : Dict =initialize_tpu(__snake_case ) A__ : int =tf.distribute.TPUStrategy(__snake_case ) else: A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer ) A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) ) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}." ) A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." ) A__ : Optional[Any] =count_samples(__snake_case ) A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A__ : str =steps_per_epoch * args.num_epochs with strategy.scope(): A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A__ , A__ : Optional[Any] =create_optimizer( num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__snake_case, metrics=["""accuracy"""] ) def decode_fn(__snake_case : Tuple ): A__ : Dict ={ """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), } return tf.io.parse_single_example(__snake_case, __snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A__ : List[Any] =DataCollatorForLanguageModeling( tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" ) def mask_with_collator(__snake_case : Optional[int] ): # TF really needs an isin() function A__ : Union[str, Any] =( ~tf.cast(batch["""attention_mask"""], tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) A__ , A__ : List[str] =data_collator.tf_mask_tokens( batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, ) return batch A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, ) A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, ) A__ : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) ) model.fit( __snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __snake_case : str = parse_args() main(args)
687
1
'''simple docstring''' __snake_case : Union[str, Any] = 'Alexander Joslin' import operator as op from .stack import Stack def __lowerCamelCase ( __snake_case : str ) -> int: """simple docstring""" A__ : Any ={"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} A__ : Stack[int] =Stack() A__ : Stack[str] =Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__snake_case ) ) elif i in operators: # RULE 2 operator_stack.push(__snake_case ) elif i == ")": # RULE 4 A__ : List[Any] =operator_stack.peek() operator_stack.pop() A__ : Any =operand_stack.peek() operand_stack.pop() A__ : Optional[int] =operand_stack.peek() operand_stack.pop() A__ : Dict =operators[opr](__snake_case, __snake_case ) operand_stack.push(__snake_case ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __snake_case : Optional[int] = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case : Union[str, Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import requests from bsa import BeautifulSoup def __lowerCamelCase ( __snake_case : str = "AAPL" ) -> str: """simple docstring""" A__ : Optional[int] =f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" A__ : Dict =BeautifulSoup(requests.get(__snake_case ).text, """html.parser""" ) A__ : Union[str, Any] ="""My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""", class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
687
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case : Optional[int] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') __snake_case : int = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') __snake_case : Optional[Any] = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') __snake_case : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') __snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
687
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = (DEISMultistepScheduler,) __snake_case = (('num_inference_steps', 25),) def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Dict: '''simple docstring''' A__ : int ={ """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**lowerCAmelCase_ ) return config def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int]=0 , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A__ : Tuple =dict(self.forward_default_kwargs ) A__ : List[Any] =kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ ) A__ : int =self.dummy_sample A__ : Optional[int] =0.1 * sample A__ : Any =[residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase_ ) A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals A__ : str =dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) A__ : Any =scheduler_class.from_pretrained(lowerCAmelCase_ ) new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals A__ : Union[str, Any] =dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ : Optional[int] =sample, sample for t in range(lowerCAmelCase_ , time_step + scheduler.config.solver_order + 1 ): A__ : List[Any] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample A__ : Optional[Any] =new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Tuple ) -> int: '''simple docstring''' pass def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int]=0 , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A__ : int =dict(self.forward_default_kwargs ) A__ : List[Any] =kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ ) A__ : Tuple =self.dummy_sample A__ : int =0.1 * sample A__ : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ : int =self.get_scheduler_config() A__ : Tuple =scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ : str =dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) A__ : Any =scheduler_class.from_pretrained(lowerCAmelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residual (must be after setting timesteps) A__ : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order] A__ : Any =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample A__ : Dict =new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Tuple , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if scheduler is None: A__ : Tuple =self.scheduler_classes[0] A__ : List[str] =self.get_scheduler_config(**lowerCAmelCase_ ) A__ : Tuple =scheduler_class(**lowerCAmelCase_ ) A__ : str =self.scheduler_classes[0] A__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase_ ) A__ : Tuple =scheduler_class(**lowerCAmelCase_ ) A__ : Union[str, Any] =10 A__ : List[str] =self.dummy_model() A__ : str =self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : List[str] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample return sample def lowercase__ ( self : int ) -> str: '''simple docstring''' A__ : int =dict(self.forward_default_kwargs ) A__ : str =kwargs.pop("""num_inference_steps""" , lowerCAmelCase_ ) for scheduler_class in self.scheduler_classes: A__ : List[Any] =self.get_scheduler_config() A__ : List[Any] =scheduler_class(**lowerCAmelCase_ ) A__ : Optional[int] =self.dummy_sample A__ : Dict =0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase_ , """set_timesteps""" ): scheduler.set_timesteps(lowerCAmelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , """set_timesteps""" ): A__ : List[str] =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A__ : int =[residual + 0.2, residual + 0.15, residual + 0.10] A__ : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order] A__ : Dict =scheduler.timesteps[5] A__ : Union[str, Any] =scheduler.timesteps[6] A__ : List[Any] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase__ ( self : Optional[int] ) -> str: '''simple docstring''' # make sure that iterating over schedulers with same config names gives same results # for defaults A__ : Tuple =DEISMultistepScheduler(**self.get_scheduler_config() ) A__ : Optional[int] =self.full_loop(scheduler=lowerCAmelCase_ ) A__ : List[str] =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.23916 ) < 1e-3 A__ : List[str] =DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ : Tuple =DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ : Any =UniPCMultistepScheduler.from_config(scheduler.config ) A__ : Dict =DEISMultistepScheduler.from_config(scheduler.config ) A__ : Optional[int] =self.full_loop(scheduler=lowerCAmelCase_ ) A__ : Optional[int] =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.23916 ) < 1e-3 def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' self.check_over_configs(thresholding=lowerCAmelCase_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , algorithm_type="""deis""" , solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , algorithm_type=lowerCAmelCase_ , ) A__ : Tuple =self.full_loop( solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , algorithm_type=lowerCAmelCase_ , ) assert not torch.isnan(lowerCAmelCase_ ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict ) -> int: '''simple docstring''' self.check_over_configs(lower_order_final=lowerCAmelCase_ ) self.check_over_configs(lower_order_final=lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=lowerCAmelCase_ , time_step=0 ) def lowercase__ ( self : Dict ) -> Any: '''simple docstring''' A__ : Union[str, Any] =self.full_loop() A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.23916 ) < 1e-3 def lowercase__ ( self : Tuple ) -> Dict: '''simple docstring''' A__ : List[Any] =self.full_loop(prediction_type="""v_prediction""" ) A__ : Dict =torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.091 ) < 1e-3 def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Dict =self.scheduler_classes[0] A__ : Any =self.get_scheduler_config(thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0 ) A__ : Union[str, Any] =scheduler_class(**lowerCAmelCase_ ) A__ : List[Any] =10 A__ : str =self.dummy_model() A__ : List[Any] =self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): A__ : Optional[int] =model(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample assert sample.dtype == torch.floataa
687
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str: """simple docstring""" A__ : int =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Any ="""""" else: A__ : Optional[int] ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : Optional[int] =in_proj_weight[ : config.hidden_size, : ] A__ : str =in_proj_bias[: config.hidden_size] A__ : Optional[Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : List[Any] =in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Dict =dct.pop(__snake_case ) A__ : Tuple =val def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str: """simple docstring""" A__ : Tuple =ViTConfig() # patch_size if model_name[-1] == "8": A__ : Optional[Any] =8 # set labels if required if not base_model: A__ : Optional[Any] =1_000 A__ : str ="""huggingface/label-files""" A__ : Any ="""imagenet-1k-id2label.json""" A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : List[Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: A__ : str =384 A__ : Optional[Any] =1_536 A__ : Optional[Any] =12 A__ : Union[str, Any] =6 # load original model from torch hub A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys A__ : List[str] =original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if base_model: A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval() else: A__ : List[str] =ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor A__ : Union[str, Any] =ViTImageProcessor() A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Union[str, Any] =encoding["""pixel_values"""] A__ : Union[str, Any] =model(__snake_case ) if base_model: A__ : List[str] =original_model(__snake_case ) assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: A__ : Optional[int] =original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
687
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __snake_case : Dict = logging.get_logger(__name__) __snake_case : Union[str, Any] = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__( self : Any , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : int ) -> Optional[Any]: '''simple docstring''' logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) A__ : str =model A__ : Optional[Any] =kwargs.get("""model_save_dir""" , lowerCAmelCase_ ) A__ : List[str] =kwargs.get("""latest_model_name""" , lowerCAmelCase_ ) def __call__( self : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' A__ : Optional[int] ={k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ ) @staticmethod def lowercase__ ( lowerCAmelCase_ : Union[str, Path] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]: '''simple docstring''' if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) A__ : Union[str, Any] ="""CPUExecutionProvider""" return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ ) def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Path] , lowerCAmelCase_ : Optional[str] = None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME A__ : Any =self.model_save_dir.joinpath(self.latest_model_name ) A__ : Optional[Any] =Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ ) try: shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A__ : Optional[int] =self.model_save_dir.joinpath(lowerCAmelCase_ ) if src_path.exists(): A__ : Optional[int] =Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ ) try: shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ ) except shutil.SameFileError: pass def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Dict , ) -> str: '''simple docstring''' if os.path.isfile(lowerCAmelCase_ ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) # saving model weights/files self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def lowercase__ ( cls : int , lowerCAmelCase_ : Union[str, Path] , lowerCAmelCase_ : Optional[Union[bool, str, None]] = None , lowerCAmelCase_ : Optional[Union[str, None]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional["ort.SessionOptions"] = None , **lowerCAmelCase_ : Any , ) -> Optional[int]: '''simple docstring''' A__ : Dict =file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase_ ): A__ : Tuple =OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ ) A__ : List[str] =Path(lowerCAmelCase_ ) # load model from hub else: # download model A__ : List[str] =hf_hub_download( repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , ) A__ : Dict =Path(lowerCAmelCase_ ).parent A__ : Optional[Any] =Path(lowerCAmelCase_ ).name A__ : Union[str, Any] =OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ ) return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def lowercase__ ( cls : List[Any] , lowerCAmelCase_ : Union[str, Path] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , **lowerCAmelCase_ : str , ) -> Optional[Any]: '''simple docstring''' A__ : List[Any] =None if len(str(lowerCAmelCase_ ).split("""@""" ) ) == 2: A__ , A__ : List[Any] =model_id.split("""@""" ) return cls._from_pretrained( model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
687
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'linear' __snake_case = 'cosine' __snake_case = 'cosine_with_restarts' __snake_case = 'polynomial' __snake_case = 'constant' __snake_case = 'constant_with_warmup' __snake_case = 'piecewise_constant' def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]: """simple docstring""" return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1.0, __snake_case ) ) return 1.0 return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]: """simple docstring""" A__ : str ={} A__ : Tuple =step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A__ , A__ : int =rule_str.split(""":""" ) A__ : Optional[int] =int(__snake_case ) A__ : List[Any] =float(__snake_case ) A__ : Union[str, Any] =value A__ : int =float(rule_list[-1] ) def create_rules_function(__snake_case : int, __snake_case : Dict ): def rule_func(__snake_case : int ) -> float: A__ : Any =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__snake_case ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A__ : Any =create_rules_function(__snake_case, __snake_case ) return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) return max( 0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : Dict ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]: """simple docstring""" A__ : Optional[int] =optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A__ : List[Any] =lr_init - lr_end A__ : Any =num_training_steps - num_warmup_steps A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps A__ : List[str] =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__snake_case, __snake_case, __snake_case ) __snake_case : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple: """simple docstring""" A__ : Tuple =SchedulerType(__snake_case ) A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__snake_case, last_epoch=__snake_case ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, ) return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
687
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = ['image_processor', 'feature_extractor'] __snake_case = 'TvltImageProcessor' __snake_case = 'TvltFeatureExtractor' def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ) -> List[str]: '''simple docstring''' super().__init__(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) A__ : List[Any] =image_processor A__ : Any =feature_extractor def __call__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] , ) -> List[Any]: '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) A__ : Dict =None if images is not None: A__ : Any =self.image_processor(lowerCAmelCase_ , mask_pixel=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if images_mixed is not None: A__ : Any =self.image_processor(lowerCAmelCase_ , is_mixed=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if audio is not None: A__ : Optional[int] =self.feature_extractor( lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , mask_audio=lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : List[str] ={} if audio is not None: output_dict.update(lowerCAmelCase_ ) if images is not None: output_dict.update(lowerCAmelCase_ ) if images_mixed_dict is not None: output_dict.update(lowerCAmelCase_ ) return output_dict @property def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : List[Any] =self.image_processor.model_input_names A__ : Union[str, Any] =self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : List[str] = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __snake_case : Tuple = None __snake_case : str = logging.get_logger(__name__) __snake_case : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __snake_case : Optional[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __snake_case : Union[str, Any] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __snake_case : List[Any] = '▁' class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = BigBirdTokenizer __snake_case = ['input_ids', 'attention_mask'] __snake_case = [] def __init__( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Optional[int]="[CLS]" , **lowerCAmelCase_ : Optional[int] , ) -> Any: '''simple docstring''' A__ : int =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token A__ : Optional[int] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token A__ : str =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token A__ : List[str] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token A__ : int =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token A__ : List[str] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ : Optional[int] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : Tuple =vocab_file A__ : Dict =False if not self.vocab_file else True def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : str =[self.sep_token_id] A__ : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase__ ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : List[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[int] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __lowerCamelCase ( __snake_case : List[str] ) -> Tuple: """simple docstring""" A__ : int =filter(lambda __snake_case : p.requires_grad, model.parameters() ) A__ : Dict =sum([np.prod(p.size() ) for p in model_parameters] ) return params __snake_case : int = logging.getLogger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any] ) -> int: """simple docstring""" if metric == "rouge2": A__ : str ="""{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": A__ : Union[str, Any] ="""{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": A__ : Union[str, Any] ="""{val_avg_em:.4f}-{step_count}""" elif metric == "loss": A__ : Union[str, Any] ="""{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" """ function.""" ) A__ : List[str] =ModelCheckpoint( dirpath=__snake_case, filename=__snake_case, monitor=f"val_{metric}", mode="""max""", save_top_k=1, every_n_epochs=1, ) return checkpoint_callback def __lowerCamelCase ( __snake_case : List[Any], __snake_case : str ) -> int: """simple docstring""" return EarlyStopping( monitor=f"val_{metric}", mode="""min""" if """loss""" in metric else """max""", patience=__snake_case, verbose=__snake_case, ) class lowerCamelCase ( pl.Callback ): '''simple docstring''' def lowercase__ ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] ={f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase_ ) @rank_zero_only def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=True ) -> None: '''simple docstring''' logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" ) A__ : Optional[Any] =trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results A__ : Any =Path(pl_module.hparams.output_dir ) if type_path == "test": A__ : Union[str, Any] =od / """test_results.txt""" A__ : str =od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. A__ : List[str] =od / f"{type_path}_results/{trainer.global_step:05d}.txt" A__ : Any =od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , """a+""" ) as writer: for key in sorted(lowerCAmelCase_ ): if key in ["log", "progress_bar", "preds"]: continue A__ : str =metrics[key] if isinstance(lowerCAmelCase_ , torch.Tensor ): A__ : Optional[Any] =val.item() A__ : List[str] =f"{key}: {val:.6f}\n" writer.write(lowerCAmelCase_ ) if not save_generations: return if "preds" in metrics: A__ : Optional[int] ="""\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(lowerCAmelCase_ ) @rank_zero_only def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' try: A__ : List[str] =pl_module.model.model.num_parameters() except AttributeError: A__ : int =pl_module.model.num_parameters() A__ : int =count_trainable_parameters(lowerCAmelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} ) @rank_zero_only def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Optional[int]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , """test""" ) @rank_zero_only def lowercase__ ( self : str , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : List[str] ) -> List[str]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
687
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case : Union[str, Any] = HfApi() __snake_case : int = {} # fmt: off __snake_case : List[Any] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) __snake_case : Dict = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) __snake_case : str = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) __snake_case : Union[str, Any] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) __snake_case : Union[str, Any] = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) __snake_case : Dict = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) __snake_case : str = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) __snake_case : List[str] = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) __snake_case : str = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) __snake_case : Optional[Any] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) __snake_case : Optional[Any] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) __snake_case : Dict = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) __snake_case : Dict = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) __snake_case : Tuple = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) __snake_case : Optional[Any] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on __snake_case : Optional[Any] = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('CompVis'): __snake_case : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: __snake_case : int = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case : Tuple = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case : List[str] = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
687
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'conditional_detr' __snake_case = ['past_key_values'] __snake_case = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Tuple =backbone_config.get("""model_type""" ) A__ : List[str] =CONFIG_MAPPING[backbone_model_type] A__ : Dict =config_class.from_dict(lowerCAmelCase_ ) A__ : int =use_timm_backbone A__ : List[Any] =backbone_config A__ : Optional[int] =num_channels A__ : Optional[int] =num_queries A__ : Union[str, Any] =d_model A__ : Optional[int] =encoder_ffn_dim A__ : Optional[Any] =encoder_layers A__ : int =encoder_attention_heads A__ : Optional[Any] =decoder_ffn_dim A__ : Tuple =decoder_layers A__ : Optional[Any] =decoder_attention_heads A__ : Tuple =dropout A__ : int =attention_dropout A__ : Dict =activation_dropout A__ : Union[str, Any] =activation_function A__ : List[str] =init_std A__ : str =init_xavier_std A__ : int =encoder_layerdrop A__ : List[Any] =decoder_layerdrop A__ : Tuple =encoder_layers A__ : Tuple =auxiliary_loss A__ : List[Any] =position_embedding_type A__ : int =backbone A__ : Optional[int] =use_pretrained_backbone A__ : str =dilation # Hungarian matcher A__ : Any =class_cost A__ : str =bbox_cost A__ : str =giou_cost # Loss coefficients A__ : Union[str, Any] =mask_loss_coefficient A__ : int =dice_loss_coefficient A__ : Union[str, Any] =cls_loss_coefficient A__ : List[str] =bbox_loss_coefficient A__ : str =giou_loss_coefficient A__ : Optional[Any] =focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return self.d_model def lowercase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : int =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A__ : str =self.backbone_config.to_dict() A__ : int =self.__class__.model_type return output class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowercase__ ( self : Any ) -> float: '''simple docstring''' return 1e-5 @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return 12
687
1
'''simple docstring''' from PIL import Image def __lowerCamelCase ( __snake_case : Image, __snake_case : float ) -> Image: """simple docstring""" def brightness(__snake_case : int ) -> float: return 128 + level + (c - 128) if not -2_55.0 <= level <= 2_55.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__snake_case ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __snake_case : List[Any] = change_brightness(img, 100) brigt_img.save('image_data/lena_brightness.png', format='png')
687
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
1
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : str ) -> Optional[int]: """simple docstring""" A__ : Any =checkpoint A__ : Optional[int] ={} A__ : Union[str, Any] =vae_state_dict["""encoder.conv_in.weight"""] A__ : Optional[int] =vae_state_dict["""encoder.conv_in.bias"""] A__ : Union[str, Any] =vae_state_dict["""encoder.conv_out.weight"""] A__ : Optional[int] =vae_state_dict["""encoder.conv_out.bias"""] A__ : List[str] =vae_state_dict["""encoder.norm_out.weight"""] A__ : Dict =vae_state_dict["""encoder.norm_out.bias"""] A__ : int =vae_state_dict["""decoder.conv_in.weight"""] A__ : List[str] =vae_state_dict["""decoder.conv_in.bias"""] A__ : Tuple =vae_state_dict["""decoder.conv_out.weight"""] A__ : List[Any] =vae_state_dict["""decoder.conv_out.bias"""] A__ : Union[str, Any] =vae_state_dict["""decoder.norm_out.weight"""] A__ : int =vae_state_dict["""decoder.norm_out.bias"""] A__ : Dict =vae_state_dict["""quant_conv.weight"""] A__ : Dict =vae_state_dict["""quant_conv.bias"""] A__ : Dict =vae_state_dict["""post_quant_conv.weight"""] A__ : List[Any] =vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only A__ : Dict =len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) A__ : List[Any] ={ layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(__snake_case ) } # Retrieves the keys for the decoder up blocks only A__ : int =len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) A__ : Union[str, Any] ={ layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(__snake_case ) } for i in range(__snake_case ): A__ : Optional[int] =[key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: A__ : Optional[Any] =vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) A__ : Union[str, Any] =vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) A__ : Union[str, Any] =renew_vae_resnet_paths(__snake_case ) A__ : Dict ={"""old""": f"down.{i}.block", """new""": f"down_blocks.{i}.resnets"} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) A__ : Union[str, Any] =[key for key in vae_state_dict if """encoder.mid.block""" in key] A__ : str =2 for i in range(1, num_mid_res_blocks + 1 ): A__ : List[Any] =[key for key in mid_resnets if f"encoder.mid.block_{i}" in key] A__ : Dict =renew_vae_resnet_paths(__snake_case ) A__ : List[Any] ={"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) A__ : Optional[int] =[key for key in vae_state_dict if """encoder.mid.attn""" in key] A__ : Union[str, Any] =renew_vae_attention_paths(__snake_case ) A__ : Dict ={"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) conv_attn_to_linear(__snake_case ) for i in range(__snake_case ): A__ : Any =num_up_blocks - 1 - i A__ : Union[str, Any] =[ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: A__ : List[Any] =vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] A__ : Union[str, Any] =vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] A__ : Dict =renew_vae_resnet_paths(__snake_case ) A__ : Union[str, Any] ={"""old""": f"up.{block_id}.block", """new""": f"up_blocks.{i}.resnets"} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) A__ : Optional[Any] =[key for key in vae_state_dict if """decoder.mid.block""" in key] A__ : int =2 for i in range(1, num_mid_res_blocks + 1 ): A__ : Union[str, Any] =[key for key in mid_resnets if f"decoder.mid.block_{i}" in key] A__ : Optional[Any] =renew_vae_resnet_paths(__snake_case ) A__ : Optional[int] ={"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) A__ : Union[str, Any] =[key for key in vae_state_dict if """decoder.mid.attn""" in key] A__ : Union[str, Any] =renew_vae_attention_paths(__snake_case ) A__ : List[Any] ={"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__snake_case, __snake_case, __snake_case, additional_replacements=[meta_path], config=__snake_case ) conv_attn_to_linear(__snake_case ) return new_checkpoint def __lowerCamelCase ( __snake_case : str, __snake_case : str, ) -> Dict: """simple docstring""" A__ : int =requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) A__ : Optional[int] =io.BytesIO(r.content ) A__ : Dict =OmegaConf.load(__snake_case ) A__ : int =512 A__ : Any ="""cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open A__ : Union[str, Any] ={} with safe_open(__snake_case, framework="""pt""", device="""cpu""" ) as f: for key in f.keys(): A__ : Optional[Any] =f.get_tensor(__snake_case ) else: A__ : Optional[Any] =torch.load(__snake_case, map_location=__snake_case )["""state_dict"""] # Convert the VAE model. A__ : Any =create_vae_diffusers_config(__snake_case, image_size=__snake_case ) A__ : Optional[int] =custom_convert_ldm_vae_checkpoint(__snake_case, __snake_case ) A__ : str =AutoencoderKL(**__snake_case ) vae.load_state_dict(__snake_case ) vae.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : str = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') __snake_case : Optional[int] = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
687
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
1
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: """simple docstring""" A__ : Union[str, Any] =10 A__ : Dict =datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) A__ : Dict =datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10, """id""": list(range(__snake_case ) ), }, features=__snake_case, ) return dataset @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[int] ) -> int: """simple docstring""" A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=__snake_case ) return filename # FILE_CONTENT + files __snake_case : Any = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> int: """simple docstring""" A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt""" A__ : List[Any] =FILE_CONTENT with open(__snake_case, """w""" ) as f: f.write(__snake_case ) return filename @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any] ) -> str: """simple docstring""" import bza A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" A__ : str =bytes(__snake_case, """utf-8""" ) with bza.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> int: """simple docstring""" import gzip A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) A__ : List[str] =bytes(__snake_case, """utf-8""" ) with gzip.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if datasets.config.LZ4_AVAILABLE: import lza.frame A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" A__ : List[Any] =bytes(__snake_case, """utf-8""" ) with lza.frame.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[Any] ) -> List[str]: """simple docstring""" if datasets.config.PY7ZR_AVAILABLE: import pyazr A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(__snake_case, """w""" ) as archive: archive.write(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[Any] ) -> int: """simple docstring""" import tarfile A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(__snake_case, """w""" ) as f: f.add(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[str] ) -> str: """simple docstring""" import lzma A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" A__ : str =bytes(__snake_case, """utf-8""" ) with lzma.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[str], __snake_case : str ) -> Union[str, Any]: """simple docstring""" import zipfile A__ : Any =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd A__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" A__ : Any =bytes(__snake_case, """utf-8""" ) with zstd.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml""" A__ : Optional[Any] =textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(__snake_case, """w""" ) as f: f.write(__snake_case ) return filename __snake_case : Dict = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __snake_case : List[Any] = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __snake_case : Any = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __snake_case : List[Any] = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __snake_case : List[Any] = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> Tuple: """simple docstring""" return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict ) -> Tuple: """simple docstring""" A__ : Dict =datasets.Dataset.from_dict(__snake_case ) A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(__snake_case ) ) as con: A__ : Any =con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""", tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" A__ : Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(__snake_case, """w""", newline="""""" ) as f: A__ : Tuple =csv.DictWriter(__snake_case, fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(__snake_case, """w""", newline="""""" ) as f: A__ : str =csv.DictWriter(__snake_case, fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> Tuple: """simple docstring""" import bza A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(__snake_case, """rb""" ) as f: A__ : Optional[Any] =f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__snake_case, """wb""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[int], __snake_case : Dict ) -> List[str]: """simple docstring""" A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Any, __snake_case : List[Any], __snake_case : str ) -> Optional[int]: """simple docstring""" A__ : Tuple =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(csv_path.replace(""".csv""", """.CSV""" ) ) ) f.write(__snake_case, arcname=os.path.basename(csva_path.replace(""".csv""", """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Tuple, __snake_case : Any ) -> str: """simple docstring""" A__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" A__ : Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) A__ : Optional[int] =pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(__snake_case, """wb""" ) as f: A__ : List[str] =pq.ParquetWriter(__snake_case, schema=__snake_case ) A__ : Tuple =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]}, schema=__snake_case ) writer.write_table(__snake_case ) writer.close() return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Tuple ) -> str: """simple docstring""" A__ : Union[str, Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) A__ : List[Any] ={"""data""": DATA} with open(__snake_case, """w""" ) as f: json.dump(__snake_case, __snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) A__ : Dict ={"""data""": DATA_DICT_OF_LISTS} with open(__snake_case, """w""" ) as f: json.dump(__snake_case, __snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict ) -> int: """simple docstring""" A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(__snake_case, """w""" ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Tuple ) -> str: """simple docstring""" A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(__snake_case, """w""" ) as f: for item in DATA: f.write(json.dumps(__snake_case ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : str ) -> Dict: """simple docstring""" A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(__snake_case, """w""" ) as f: for item in DATA_312: f.write(json.dumps(__snake_case ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict ) -> Dict: """simple docstring""" A__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(__snake_case, """w""" ) as f: for item in DATA_STR: f.write(json.dumps(__snake_case ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" import gzip A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(__snake_case, """rb""" ) as orig_file: with gzip.open(__snake_case, """wb""" ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Any: """simple docstring""" import gzip A__ : int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(__snake_case, """rb""" ) as orig_file: with gzip.open(__snake_case, """wb""" ) as zipped_file: zipped_file.writelines(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[str], __snake_case : str, __snake_case : Tuple ) -> Optional[int]: """simple docstring""" A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" A__ : Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.join("""nested""", os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : str, __snake_case : Dict ) -> Union[str, Any]: """simple docstring""" A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : str, __snake_case : Any ) -> List[str]: """simple docstring""" A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(__snake_case, """w""" ) as f: f.add(__snake_case, arcname=os.path.basename(__snake_case ) ) f.add(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str], __snake_case : List[str], __snake_case : Any ) -> Union[str, Any]: """simple docstring""" A__ : Any =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(__snake_case, """w""" ) as f: f.add(__snake_case, arcname=os.path.join("""nested""", os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Any: """simple docstring""" A__ : int =["""0""", """1""", """2""", """3"""] A__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(__snake_case, """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : str ) -> Optional[Any]: """simple docstring""" A__ : Union[str, Any] =["""0""", """1""", """2""", """3"""] A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(__snake_case, """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Tuple ) -> Optional[int]: """simple docstring""" A__ : Optional[int] =["""0""", """1""", """2""", """3"""] A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(__snake_case, """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any, __snake_case : int ) -> str: """simple docstring""" A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any], __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" A__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any], __snake_case : Tuple ) -> int: """simple docstring""" A__ : Any =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename("""unsupported.ext""" ) ) f.write(__snake_case, arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(__snake_case, """w""", encoding="""utf-8""" ) as f: f.write(__snake_case ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> Dict: """simple docstring""" return os.path.join("""tests""", """features""", """data""", """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> List[str]: """simple docstring""" return os.path.join("""tests""", """features""", """data""", """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : int, __snake_case : List[str] ) -> int: """simple docstring""" A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(__snake_case, """w""" ) as f: f.write(__snake_case, arcname=os.path.basename(__snake_case ) ) f.write(__snake_case, arcname=os.path.basename(__snake_case ).replace(""".jpg""", """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( __snake_case : Optional[int] ) -> str: """simple docstring""" A__ : Dict =tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""", """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """subdir""" / """test.txt""", """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""", """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""", """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """.subdir""" / """test.txt""", """w""" ) as f: f.write("""bar\n""" * 10 ) return data_dir
687
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __snake_case : str = False class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int ="""A painting of a squirrel eating a burger """ A__ : Tuple =torch.manual_seed(0 ) A__ : int =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int =generator.manual_seed(0 ) A__ : Tuple =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict ="""A painting of a squirrel eating a burger """ A__ : Optional[int] =torch.manual_seed(0 ) A__ : List[str] =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __snake_case : Optional[int] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Optional[Any] =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Optional[Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' A__ : Dict =self.encoder(lowerCAmelCase_ ) A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ ) else: A__ : List[str] =h A__ : Dict =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' A__ : Optional[int] =sample A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents A__ : Tuple =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
687
1
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __snake_case : str = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __snake_case : List[Any] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str: """simple docstring""" A__ : Optional[int] =set() A__ : Optional[int] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ : str =char A__ : List[Any] =set(__snake_case ) return pairs class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =vocab_file A__ : Any =merges_file A__ : Union[str, Any] ={} A__ : Optional[int] =0 A__ : List[Any] =1 A__ : Tuple =2 A__ : Dict =3 self.add_from_file(lowerCAmelCase_ ) A__ : List[str] ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: A__ : str =merges_handle.read().split("""\n""" )[:-1] A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges] A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Dict ={} def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ : Dict =[self.cls_token_id] A__ : Union[str, Any] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ : int =tuple(lowerCAmelCase_ ) A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) A__ : Tuple =get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ : Tuple =bigram A__ : Optional[int] =[] A__ : Tuple =0 while i < len(lowerCAmelCase_ ): try: A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ : Union[str, Any] =j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ : Dict =tuple(lowerCAmelCase_ ) A__ : Dict =new_word if len(lowerCAmelCase_ ) == 1: break else: A__ : str =get_pairs(lowerCAmelCase_ ) A__ : Dict ="""@@ """.join(lowerCAmelCase_ ) A__ : Tuple =word[:-4] A__ : Any =word return word def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : int =[] A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : Optional[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.merges_file , lowerCAmelCase_ ) return out_vocab_file, out_merge_file def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return A__ : Union[str, Any] =f.readlines() for lineTmp in lines: A__ : List[Any] =lineTmp.strip() A__ : Dict =line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) A__ : Tuple =line[:idx] A__ : Tuple =len(self.encoder )
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int ) -> bool: """simple docstring""" return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') __snake_case : Union[str, Any] = int(input('Enter number: ').strip()) print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
687
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int: """simple docstring""" A__ : Union[str, Any] =nn.functional.normalize(__snake_case ) A__ : Optional[Any] =nn.functional.normalize(__snake_case ) return torch.mm(__snake_case, normalized_text_embeds.t() ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ['CLIPEncoderLayer'] def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A__ : str =CLIPVisionModel(config.vision_config ) A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ ) A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ ) A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ ) @torch.no_grad() def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : Any =self.visual_projection(lowerCAmelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy() A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy() A__ : List[str] =[] A__ : Optional[int] =image_embeds.shape[0] for i in range(lowerCAmelCase_ ): A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ : Optional[Any] =special_cos_dist[i][concept_idx] A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) A__ : Dict =0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ : Optional[int] =cos_dist[i][concept_idx] A__ : List[str] =self.concept_embeds_weights[concept_idx].item() A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : List[Any] =self.visual_projection(lowerCAmelCase_ ) A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ) A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ : Dict =0.0 A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 ) A__ : Tuple =special_care * 0.01 A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : Optional[Any] ) -> Optional[int]: """simple docstring""" if isinstance(__snake_case, torch.Tensor ): return image elif isinstance(__snake_case, PIL.Image.Image ): A__ : Optional[Any] =[image] if isinstance(image[0], PIL.Image.Image ): A__ : Optional[int] =[np.array(i.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] A__ : List[str] =np.concatenate(__snake_case, axis=0 ) A__ : List[Any] =np.array(__snake_case ).astype(np.floataa ) / 2_55.0 A__ : Tuple =image.transpose(0, 3, 1, 2 ) A__ : Any =2.0 * image - 1.0 A__ : Any =torch.from_numpy(__snake_case ) elif isinstance(image[0], torch.Tensor ): A__ : List[str] =torch.cat(__snake_case, dim=0 ) return image def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : List[Any]=0.99_95 ) -> Optional[Any]: """simple docstring""" if not isinstance(__snake_case, np.ndarray ): A__ : List[Any] =True A__ : Tuple =va.device A__ : str =va.cpu().numpy() A__ : List[str] =va.cpu().numpy() A__ : List[str] =np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) ) if np.abs(__snake_case ) > DOT_THRESHOLD: A__ : str =(1 - t) * va + t * va else: A__ : Optional[int] =np.arccos(__snake_case ) A__ : Optional[int] =np.sin(__snake_case ) A__ : Union[str, Any] =theta_a * t A__ : Union[str, Any] =np.sin(__snake_case ) A__ : Optional[Any] =np.sin(theta_a - theta_t ) / sin_theta_a A__ : Any =sin_theta_t / sin_theta_a A__ : Optional[int] =sa * va + sa * va if inputs_are_torch: A__ : Dict =torch.from_numpy(__snake_case ).to(__snake_case ) return va def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Any =F.normalize(__snake_case, dim=-1 ) A__ : List[str] =F.normalize(__snake_case, dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Any ) -> Tuple: """simple docstring""" for param in model.parameters(): A__ : Any =value class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : List[Any] , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase_ : CLIPFeatureExtractor , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=None , ) -> Any: '''simple docstring''' super().__init__() self.register_modules( vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , clip_model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , coca_model=lowerCAmelCase_ , coca_tokenizer=lowerCAmelCase_ , coca_transform=lowerCAmelCase_ , ) A__ : Union[str, Any] =( feature_extractor.size if isinstance(feature_extractor.size , lowerCAmelCase_ ) else feature_extractor.size["""shortest_edge"""] ) A__ : List[str] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowerCAmelCase_ ) set_requires_grad(self.clip_model , lowerCAmelCase_ ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> int: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A__ : Union[str, Any] =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.enable_attention_slicing(lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Any: '''simple docstring''' set_requires_grad(self.vae , lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' set_requires_grad(self.vae , lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' set_requires_grad(self.unet , lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' set_requires_grad(self.unet , lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Tuple: '''simple docstring''' # get the original timestep using init_timestep A__ : List[Any] =min(int(num_inference_steps * strength ) , lowerCAmelCase_ ) A__ : Union[str, Any] =max(num_inference_steps - init_timestep , 0 ) A__ : Any =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> Tuple: '''simple docstring''' if not isinstance(lowerCAmelCase_ , torch.Tensor ): raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase_ )}" ) A__ : Optional[int] =image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Dict =[ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ ) ] A__ : Any =torch.cat(lowerCAmelCase_ , dim=0 ) else: A__ : List[Any] =self.vae.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ : Dict =0.18215 * init_latents A__ : Optional[Any] =init_latents.repeat_interleave(lowerCAmelCase_ , dim=0 ) A__ : Optional[Any] =randn_tensor(init_latents.shape , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) # get latents A__ : Union[str, Any] =self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Optional[Any] =init_latents return latents def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ : Optional[int] =self.coca_transform(lowerCAmelCase_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): A__ : Union[str, Any] =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) A__ : Dict =self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> str: '''simple docstring''' A__ : Union[str, Any] =self.feature_extractor.preprocess(lowerCAmelCase_ ) A__ : Union[str, Any] =torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() A__ : str =self.clip_model.get_image_features(lowerCAmelCase_ ) A__ : List[Any] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ ) A__ : List[Any] =image_embeddings_clip.repeat_interleave(lowerCAmelCase_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , ) -> str: '''simple docstring''' A__ : Dict =latents.detach().requires_grad_() A__ : List[str] =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual A__ : int =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): A__ : Union[str, Any] =self.scheduler.alphas_cumprod[timestep] A__ : Union[str, Any] =1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A__ : Any =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 A__ : str =torch.sqrt(lowerCAmelCase_ ) A__ : Optional[int] =pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowerCAmelCase_ ): A__ : Optional[int] =self.scheduler.sigmas[index] A__ : Optional[Any] =latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler )} not supported" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ : str =1 / 0.18215 * sample A__ : List[str] =self.vae.decode(lowerCAmelCase_ ).sample A__ : Any =(image / 2 + 0.5).clamp(0 , 1 ) A__ : Union[str, Any] =transforms.Resize(self.feature_extractor_size )(lowerCAmelCase_ ) A__ : int =self.normalize(lowerCAmelCase_ ).to(latents.dtype ) A__ : int =self.clip_model.get_image_features(lowerCAmelCase_ ) A__ : Optional[Any] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ ) A__ : Optional[int] =spherical_dist_loss(lowerCAmelCase_ , lowerCAmelCase_ ).mean() * clip_guidance_scale A__ : Union[str, Any] =-torch.autograd.grad(lowerCAmelCase_ , lowerCAmelCase_ )[0] if isinstance(self.scheduler , lowerCAmelCase_ ): A__ : List[Any] =latents.detach() + grads * (sigma**2) A__ : List[str] =noise_pred_original else: A__ : Optional[Any] =noise_pred_original - torch.sqrt(lowerCAmelCase_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[int] = 5_12 , lowerCAmelCase_ : Optional[int] = 5_12 , lowerCAmelCase_ : float = 0.6 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[float] = 7.5 , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[float] = 1_00 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : float = 0.8 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , ) -> Optional[int]: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size: raise ValueError(f"You have passed {batch_size} batch_size, but only {len(lowerCAmelCase_ )} generators." ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if isinstance(lowerCAmelCase_ , torch.Generator ) and batch_size > 1: A__ : List[str] =[generator] + [None] * (batch_size - 1) A__ : Any =[ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] A__ : str =[x[0] for x in coca_is_none if x[1]] A__ : Dict =""", """.join(lowerCAmelCase_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowerCAmelCase_ ): raise ValueError( f"Content prompt is None and CoCa [{coca_is_none_str}] is None." f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) A__ : Any =self.get_image_description(lowerCAmelCase_ ) if style_prompt is None: if len(lowerCAmelCase_ ): raise ValueError( f"Style prompt is None and CoCa [{coca_is_none_str}] is None." f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) A__ : Tuple =self.get_image_description(lowerCAmelCase_ ) # get prompt text embeddings for content and style A__ : List[str] =self.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""pt""" , ) A__ : List[Any] =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] A__ : Any =self.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""pt""" , ) A__ : Dict =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] A__ : Dict =slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # duplicate text embeddings for each generation per prompt A__ : Optional[Any] =text_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 ) # set timesteps A__ : Union[str, Any] ="""offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) A__ : str ={} if accepts_offset: A__ : List[str] =1 self.scheduler.set_timesteps(lowerCAmelCase_ , **lowerCAmelCase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) A__ , A__ : List[Any] =self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device ) A__ : str =timesteps[:1].repeat(lowerCAmelCase_ ) # Preprocess image A__ : Optional[int] =preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Optional[int] =self.prepare_latents( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ ) A__ : Optional[Any] =preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =self.prepare_latents( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ ) A__ : List[Any] =slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if clip_guidance_scale > 0: A__ : Optional[int] =self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Union[str, Any] =slerp( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A__ : Dict =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A__ : Dict =content_text_input.input_ids.shape[-1] A__ : Dict =self.tokenizer([""""""] , padding="""max_length""" , max_length=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : int =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt A__ : Tuple =uncond_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A__ : int =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A__ : int =(batch_size, self.unet.config.in_channels, height // 8, width // 8) A__ : Dict =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps A__ : Any =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="""cpu""" , dtype=lowerCAmelCase_ ).to( self.device ) else: A__ : List[str] =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A__ : Tuple =latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A__ : str =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A__ : Optional[Any] ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A__ : Any ={} if accepts_eta: A__ : Union[str, Any] =eta # check if the scheduler accepts generator A__ : str ="""generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: A__ : str =generator with self.progress_bar(total=lowerCAmelCase_ ): for i, t in enumerate(lowerCAmelCase_ ): # expand the latents if we are doing classifier free guidance A__ : Any =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A__ : Dict =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual A__ : Optional[int] =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample # perform classifier free guidance if do_classifier_free_guidance: A__ , A__ : Dict =noise_pred.chunk(2 ) A__ : Union[str, Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: A__ : Any =( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) A__ , A__ : List[str] =self.cond_fn( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) # compute the previous noisy sample x_t -> x_t-1 A__ : Optional[int] =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ : int =1 / 0.18215 * latents A__ : Optional[int] =self.vae.decode(lowerCAmelCase_ ).sample A__ : Tuple =(image / 2 + 0.5).clamp(0 , 1 ) A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A__ : Optional[Any] =self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
687
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str: """simple docstring""" A__ : Optional[int] =[] for part_id in partition_order: A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(__snake_case ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : str =spark.range(100 ).repartition(1 ) A__ : List[str] =Spark(__snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Tuple =spark.range(10 ).repartition(2 ) A__ : List[str] =[1, 0] A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions. A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(10 ).repartition(1 ) A__ : List[str] =SparkExamplesIterable(__snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__snake_case ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A__ : Tuple =lambda __snake_case : x.reverse() A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] ) A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Any =spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : List[str] =spark.range(100 ).repartition(1 ) A__ : List[Any] =Spark(__snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
687
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A__ : Tuple =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : int =TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : str =AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A__ : str =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : int =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : str ) -> Any: '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : List[Any] =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : List[Any] =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) A__ , A__ : int =TFAutoModelForCausalLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) A__ , A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : List[str] ) -> int: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Dict =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Tuple =AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Union[str, Any] =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Tuple =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) A__ , A__ : List[Any] =TFAutoModelForMaskedLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) A__ , A__ : str =AutoModelForMaskedLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : List[Any] =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) A__ , A__ : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Optional[int] =AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) A__ , A__ : Union[str, Any] =AutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A__ : Optional[Any] =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Tuple =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Optional[Any] =AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A__ : Tuple =AutoConfig.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : List[Any] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Any =AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : Any =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 1_44_10 ) A__ : Optional[int] =AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 1_44_10 ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Any =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 1_44_10 ) A__ : Optional[Any] =AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 1_44_10 )
687
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : int = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __snake_case : Optional[Any] = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def __lowerCamelCase ( __snake_case : int, __snake_case : List[str] ) -> Any: """simple docstring""" A__ : Dict ={ """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks A__ : int =int(re.match(r""".*layer_(\d*).*""", __snake_case )[1] ) layer_number -= 3 return f"h.{layer_number}." + key def __lowerCamelCase ( __snake_case : Any ) -> Optional[int]: """simple docstring""" if dtype == torch.bool: return 1 / 8 A__ : List[Any] =re.search(r"""[^\d](\d+)$""", str(__snake_case ) ) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}." ) A__ : Tuple =int(bit_search.groups()[0] ) return bit_size // 8 def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[Any], __snake_case : Union[str, Any], __snake_case : int, __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" if bloom_config_file == "": A__ : Dict =BloomConfig() else: A__ : List[Any] =BloomConfig.from_json_file(__snake_case ) if shard_model: A__ : Optional[Any] =os.listdir(__snake_case ) A__ : int =sorted(filter(lambda __snake_case : s.startswith("""layer""" ) and "model_00" in s, __snake_case ) ) A__ : str ={"""weight_map""": {}, """metadata""": {}} A__ : str =0 A__ : Dict =None A__ : int =BloomConfig() for j, file in enumerate(__snake_case ): print("""Processing file: {}""".format(__snake_case ) ) A__ : Dict =None for i in range(__snake_case ): # load all TP files A__ : Any =file.replace("""model_00""", f"model_0{i}" ) A__ : Tuple =torch.load(os.path.join(__snake_case, __snake_case ), map_location="""cpu""" ) # Rename keys in the transformers names A__ : Optional[int] =list(temp.keys() ) for key in keys: A__ : Optional[int] =temp.pop(__snake_case ) if tensors is None: A__ : Dict =temp else: for key in tensors.keys(): if any(key.endswith(__snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel A__ : Union[str, Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks A__ : Any =torch.cat([tensors[key], temp[key]], dim=__snake_case ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): A__ : Dict =tensors[key] / pretraining_tp torch.save( __snake_case, os.path.join( __snake_case, """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ), str(len(__snake_case ) ).zfill(5 ) ), ), ) for key in tensors.keys(): A__ : List[Any] =tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: A__ : Any ="""pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ), str(len(__snake_case ) ).zfill(5 ) ) A__ : List[Any] =BloomConfig() A__ : Union[str, Any] =pytorch_dump_folder_path + """/""" + CONFIG_NAME A__ : Tuple =total_size with open(__snake_case, """w""", encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(__snake_case, WEIGHTS_NAME + """.index.json""" ), """w""", encoding="""utf-8""" ) as f: A__ : Dict =json.dumps(__snake_case, indent=2, sort_keys=__snake_case ) + """\n""" f.write(__snake_case ) else: A__ : Any =BloomModel(__snake_case ) A__ : Any =os.listdir(__snake_case ) A__ : List[str] =sorted(filter(lambda __snake_case : s.startswith("""layer""" ) and "model_00" in s, __snake_case ) ) A__ : Optional[Any] =None for i, file in enumerate(__snake_case ): A__ : Union[str, Any] =None for i in range(__snake_case ): # load all TP files A__ : List[Any] =file.replace("""model_00""", f"model_0{i}" ) A__ : Optional[Any] =torch.load(os.path.join(__snake_case, __snake_case ), map_location="""cpu""" ) # Rename keys in the transformers names A__ : List[str] =list(temp.keys() ) for key in keys: A__ : Optional[Any] =temp.pop(__snake_case ) if tensors is None: A__ : Any =temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(__snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel A__ : Optional[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks A__ : Any =torch.cat([tensors[key], temp[key]], dim=__snake_case ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): A__ : List[Any] =tensors[key] / pretraining_tp A__ : Dict =model.load_state_dict(__snake_case, strict=__snake_case ) assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected" if missing_keys is None: A__ : Optional[Any] =set(other_keys.missing_keys ) else: A__ : Tuple =missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f"The keys {missing_keys} are missing" # Save pytorch-model os.makedirs(__snake_case, exist_ok=__snake_case ) A__ : Optional[int] =pytorch_dump_folder_path + """/""" + WEIGHTS_NAME A__ : Any =pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" ) if config.torch_dtype is not None: A__ : str =model.to(config.torch_dtype ) torch.save(model.state_dict(), __snake_case ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(__snake_case, """w""", encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __snake_case : Dict = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
687
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str: '''simple docstring''' super().__init__() A__ : Union[str, Any] =module A__ : Union[str, Any] =nn.Sequential( nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , ) A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109659552692574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 10 def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' # Models and tokenizer A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Models and tokenizer A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : str =self.model_abit.config self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) ) A__ : Union[str, Any] =config.to_dict() A__ : Any =config.to_diff_dict() A__ : Optional[Any] =config.to_json_string() def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' from bitsandbytes.nn import Paramsabit A__ : int =self.model_fpaa.get_memory_footprint() A__ : Optional[Any] =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A__ : Tuple =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCAmelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() A__ : Tuple =True A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" ) A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =self.model_fpaa.to(torch.floataa ) A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.half() # Check this does not throw an error A__ : int =self.model_fpaa.float() def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowercase__ ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Tuple ="""t5-small""" A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name ) A__ : Optional[int] ="""Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules A__ : Optional[Any] =None # test with `t5-small` A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Optional[Any] =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : List[str] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ ) A__ : Dict =modules def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Any =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Dict =model.generate(**lowerCAmelCase_ ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' super().setUp() # model_name A__ : Any ="""bigscience/bloom-560m""" A__ : List[Any] ="""t5-small""" # Different types of model A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Sequence classification model A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # CausalLM model A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Seq2seq model A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUp() def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A__ : Optional[int] =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : str ) -> int: '''simple docstring''' super().setUp() def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] ="""facebook/opt-350m""" super().setUp() def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A__ : int =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A__ : Dict =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCAmelCase_ ) ): A__ : int =LoRALayer(module.q_proj , rank=16 ) A__ : Any =LoRALayer(module.k_proj , rank=16 ) A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A__ : Any =model.forward(**lowerCAmelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCAmelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191854854152187
687
1
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __lowerCamelCase ( __snake_case : str, __snake_case : str = "cpu", __snake_case : Union[str, None] = None ) -> None: """simple docstring""" A__ : Optional[int] =torch.load(__snake_case, map_location=__snake_case ) for k, v in tqdm(state_dict.items() ): if not isinstance(__snake_case, torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) A__ : List[str] =v.half() if save_path is None: # overwrite src_path A__ : Union[str, Any] =src_path torch.save(__snake_case, __snake_case ) if __name__ == "__main__": fire.Fire(convert)
687
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __snake_case : Optional[int] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Optional[Any] = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'resnet' __snake_case = ['basic', 'bottleneck'] def __init__( self : Any , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Union[str, Any]=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : Optional[int]=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="bottleneck" , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) A__ : Any =num_channels A__ : Optional[int] =embedding_size A__ : str =hidden_sizes A__ : Dict =depths A__ : List[str] =layer_type A__ : List[Any] =hidden_act A__ : List[Any] =downsample_in_first_stage A__ : Union[str, Any] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : str =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : str ) -> float: '''simple docstring''' return 1e-3
687
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple: '''simple docstring''' A__ : Tuple =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : Optional[Any] =is_training A__ : Dict =use_input_lengths A__ : int =use_token_type_ids A__ : Union[str, Any] =use_labels A__ : Optional[Any] =gelu_activation A__ : List[Any] =sinusoidal_embeddings A__ : List[Any] =causal A__ : str =asm A__ : Tuple =n_langs A__ : Dict =vocab_size A__ : Optional[Any] =n_special A__ : Tuple =hidden_size A__ : Dict =num_hidden_layers A__ : int =num_attention_heads A__ : Optional[Any] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Optional[int] =type_sequence_label_size A__ : Tuple =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Optional[int] =summary_type A__ : int =use_proj A__ : Tuple =scope A__ : Union[str, Any] =bos_token_id def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_input_lengths: A__ : Tuple =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ : Optional[Any] =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ : Any =None A__ : Tuple =None A__ : Optional[Any] =None if self.use_labels: A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float() A__ : str =ids_tensor([self.batch_size] , self.num_choices ) A__ : Union[str, Any] =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XLMModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Tuple =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) A__ : List[Any] =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Tuple =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , ) A__ : Optional[Any] =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , ) ((A__) , ) : List[Any] =result_with_labels.to_tuple() A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) ((A__) , ) : Tuple =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =self.num_choices A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[int] =config_and_inputs A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __snake_case = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int: '''simple docstring''' A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) A__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =XLMModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : Tuple =min_length + idx + 1 A__ : Tuple =min_length + idx + 1 A__ : Dict =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) ) def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : str =min_length + idx + 1 A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , ) pass @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCAmelCase_ ) A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president A__ : Optional[Any] =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
687
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __snake_case : Any = logging.get_logger(__name__) __snake_case : List[Any] = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 't5' __snake_case = ['past_key_values'] __snake_case = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=3_21_28 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Tuple=64 , lowerCAmelCase_ : Union[str, Any]=20_48 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict=1_28 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=1e-6 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : List[str]="relu" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : int=1 , **lowerCAmelCase_ : Dict , ) -> Tuple: '''simple docstring''' A__ : int =vocab_size A__ : Union[str, Any] =d_model A__ : Dict =d_kv A__ : List[Any] =d_ff A__ : int =num_layers A__ : List[Any] =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ : Optional[int] =num_heads A__ : Optional[Any] =relative_attention_num_buckets A__ : Tuple =relative_attention_max_distance A__ : List[Any] =dropout_rate A__ : Optional[Any] =layer_norm_epsilon A__ : Any =initializer_factor A__ : List[str] =feed_forward_proj A__ : List[str] =use_cache A__ : int =self.feed_forward_proj.split("""-""" ) A__ : Union[str, Any] =act_info[-1] A__ : int =act_info[0] == """gated""" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ : str ="""gelu_new""" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' @property def lowercase__ ( self : int ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ : List[Any] ={ """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: A__ : Dict ="""past_encoder_sequence + sequence""" A__ : Optional[Any] ={0: """batch"""} A__ : Optional[Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""} else: A__ : Optional[Any] ={0: """batch""", 1: """decoder_sequence"""} A__ : Dict ={0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="""inputs""" ) return common_inputs @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return 13
687
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' A__ : Union[str, Any] ="""ylacombe/bark-small""" A__ : Dict =tempfile.mkdtemp() A__ : str ="""en_speaker_1""" A__ : Any ="""This is a test string""" A__ : List[Any] ="""speaker_embeddings_path.json""" A__ : Union[str, Any] ="""speaker_embeddings""" def lowercase__ ( self : int , **lowerCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Dict ) -> List[str]: '''simple docstring''' A__ : Tuple =self.get_tokenizer() A__ : Optional[Any] =BarkProcessor(tokenizer=lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) A__ : str =BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) A__ : Tuple =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ : Tuple =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : Union[str, Any] =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) A__ : Dict =35 A__ : Union[str, Any] =2 A__ : Dict =8 A__ : Dict ={ """semantic_prompt""": np.ones(lowerCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset A__ : str =processor(text=self.input_string , voice_preset=lowerCAmelCase_ ) A__ : Optional[int] =inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file A__ : Optional[Any] =os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : str =processor(text=self.input_string , voice_preset=lowerCAmelCase_ ) A__ : int =inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub A__ : Union[str, Any] =processor(text=self.input_string , voice_preset=self.voice_preset ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ : Any =self.get_tokenizer() A__ : str =BarkProcessor(tokenizer=lowerCAmelCase_ ) A__ : List[Any] =processor(text=self.input_string ) A__ : List[Any] =tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
'''simple docstring''' from __future__ import annotations import requests __snake_case : Union[str, Any] = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict: """simple docstring""" A__ : Union[str, Any] =wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}" raise ValueError(__snake_case ) A__ : Tuple =requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, ) if response.status_code == 429: raise requests.HTTPError A__ : Tuple =response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} A__ : Tuple ={} for id_ in range(__snake_case ): A__ : List[Any] ={ item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
687
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __snake_case : str = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: __snake_case : int = json.load(f) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' A__ : Tuple =FSMTForConditionalGeneration.from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: '''simple docstring''' # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality A__ : Optional[Any] =f"facebook/wmt19-{pair}" A__ : Tuple =self.get_tokenizer(lowerCAmelCase_ ) A__ : Any =self.get_model(lowerCAmelCase_ ) A__ : List[str] =bleu_data[pair]["""src"""] A__ : Any =bleu_data[pair]["""tgt"""] A__ : str =tokenizer(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ , padding="""longest""" ).to(lowerCAmelCase_ ) A__ : Dict =model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ : str =tokenizer.batch_decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) A__ : Dict =calculate_bleu(lowerCAmelCase_ , lowerCAmelCase_ ) print(lowerCAmelCase_ ) self.assertGreaterEqual(scores["""bleu"""] , lowerCAmelCase_ )
687
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __snake_case : Union[str, Any] = logging.getLogger(__name__) __snake_case : int = tf.data.AUTOTUNE def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", ) parser.add_argument( """--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", ) parser.add_argument( """--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", ) parser.add_argument( """--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", ) parser.add_argument( """--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", ) parser.add_argument( """--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", ) parser.add_argument( """--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", ) parser.add_argument( """--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", ) parser.add_argument( """--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", ) parser.add_argument( """--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", ) parser.add_argument( """--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", ) parser.add_argument( """--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", ) parser.add_argument( """--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", ) parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" ) A__ : Optional[Any] =parser.parse_args() return args def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" try: if args.tpu_name: A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__snake_case ) tf.tpu.experimental.initialize_tpu_system(__snake_case ) return tpu def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" A__ : Any =0 for file in file_list: A__ : Optional[int] =file.split("""/""" )[-1] A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 ) A__ : str =int(__snake_case ) num_samples += sample_count return num_samples def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]: """simple docstring""" A__ : List[str] =count_samples(__snake_case ) A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case ) if shuffle: A__ : Optional[int] =dataset.shuffle(len(__snake_case ) ) A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) ) A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case ) if shuffle: assert shuffle_buffer_size is not None A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size ) A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case ) A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case ) A__ : Tuple =dataset.prefetch(__snake_case ) return dataset def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" if not args.no_tpu: A__ : Dict =initialize_tpu(__snake_case ) A__ : int =tf.distribute.TPUStrategy(__snake_case ) else: A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer ) A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) ) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}." ) A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." ) A__ : Optional[Any] =count_samples(__snake_case ) A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A__ : str =steps_per_epoch * args.num_epochs with strategy.scope(): A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A__ , A__ : Optional[Any] =create_optimizer( num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__snake_case, metrics=["""accuracy"""] ) def decode_fn(__snake_case : Tuple ): A__ : Dict ={ """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), } return tf.io.parse_single_example(__snake_case, __snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A__ : List[Any] =DataCollatorForLanguageModeling( tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" ) def mask_with_collator(__snake_case : Optional[int] ): # TF really needs an isin() function A__ : Union[str, Any] =( ~tf.cast(batch["""attention_mask"""], tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) A__ , A__ : List[str] =data_collator.tf_mask_tokens( batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, ) return batch A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, ) A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, ) A__ : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) ) model.fit( __snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __snake_case : str = parse_args() main(args)
687
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> tuple: """simple docstring""" A__ : Optional[int] =namedtuple("""result""", """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""", power / current ) elif current == 0: return result("""current""", power / voltage ) elif power == 0: return result("""power""", float(round(abs(voltage * current ), 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case : Union[str, Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) __snake_case : int = parser.parse_args() __snake_case : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case : int = CLIPImageProcessor() __snake_case : Any = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') __snake_case : List[str] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
687
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case : Optional[int] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') __snake_case : int = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') __snake_case : Optional[Any] = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') __snake_case : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') __snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
687
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __snake_case : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __snake_case : List[Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __snake_case : Optional[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def lowercase__ ( self : Any , lowerCAmelCase_ : List[List[List[str]]] , lowerCAmelCase_ : List[List[str]] , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 4 , ) -> Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
687
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str: """simple docstring""" A__ : int =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Any ="""""" else: A__ : Optional[int] ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : Optional[int] =in_proj_weight[ : config.hidden_size, : ] A__ : str =in_proj_bias[: config.hidden_size] A__ : Optional[Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : List[Any] =in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Dict =dct.pop(__snake_case ) A__ : Tuple =val def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str: """simple docstring""" A__ : Tuple =ViTConfig() # patch_size if model_name[-1] == "8": A__ : Optional[Any] =8 # set labels if required if not base_model: A__ : Optional[Any] =1_000 A__ : str ="""huggingface/label-files""" A__ : Any ="""imagenet-1k-id2label.json""" A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : List[Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: A__ : str =384 A__ : Optional[Any] =1_536 A__ : Optional[Any] =12 A__ : Union[str, Any] =6 # load original model from torch hub A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys A__ : List[str] =original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if base_model: A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval() else: A__ : List[str] =ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor A__ : Union[str, Any] =ViTImageProcessor() A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Union[str, Any] =encoding["""pixel_values"""] A__ : Union[str, Any] =model(__snake_case ) if base_model: A__ : List[str] =original_model(__snake_case ) assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: A__ : Optional[int] =original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def __lowerCamelCase ( ) -> None: """simple docstring""" assert xnor_gate(0, 0 ) == 1 assert xnor_gate(0, 1 ) == 0 assert xnor_gate(1, 0 ) == 0 assert xnor_gate(1, 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
687
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'linear' __snake_case = 'cosine' __snake_case = 'cosine_with_restarts' __snake_case = 'polynomial' __snake_case = 'constant' __snake_case = 'constant_with_warmup' __snake_case = 'piecewise_constant' def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]: """simple docstring""" return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1.0, __snake_case ) ) return 1.0 return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]: """simple docstring""" A__ : str ={} A__ : Tuple =step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A__ , A__ : int =rule_str.split(""":""" ) A__ : Optional[int] =int(__snake_case ) A__ : List[Any] =float(__snake_case ) A__ : Union[str, Any] =value A__ : int =float(rule_list[-1] ) def create_rules_function(__snake_case : int, __snake_case : Dict ): def rule_func(__snake_case : int ) -> float: A__ : Any =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__snake_case ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A__ : Any =create_rules_function(__snake_case, __snake_case ) return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) return max( 0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : Dict ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]: """simple docstring""" A__ : Optional[int] =optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A__ : List[Any] =lr_init - lr_end A__ : Any =num_training_steps - num_warmup_steps A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps A__ : List[str] =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__snake_case, __snake_case, __snake_case ) __snake_case : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple: """simple docstring""" A__ : Tuple =SchedulerType(__snake_case ) A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__snake_case, last_epoch=__snake_case ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, ) return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
687
1
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = XLMTokenizer __snake_case = False def lowercase__ ( self : int ) -> Any: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ : Any =[ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] A__ : List[str] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Optional[int] =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] A__ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(lowerCAmelCase_ ) ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> List[str]: '''simple docstring''' A__ : Optional[Any] ="""lower newer""" A__ : Dict ="""lower newer""" return input_text, output_text def lowercase__ ( self : str ) -> str: '''simple docstring''' A__ : int =XLMTokenizer(self.vocab_file , self.merges_file ) A__ : Optional[int] ="""lower""" A__ : List[str] =["""low""", """er</w>"""] A__ : int =tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =tokens + ["""<unk>"""] A__ : int =[14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) @slow def lowercase__ ( self : str ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" ) A__ : Optional[int] =tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ ) A__ : Dict =tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ ) A__ : str =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) A__ : List[Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : List[str] = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case : Any = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[int] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __snake_case : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Union[tf.Tensor, np.ndarray] ) -> List[int]: """simple docstring""" if isinstance(__snake_case, np.ndarray ): return list(tensor.shape ) A__ : List[str] =tf.shape(__snake_case ) if tensor.shape == tf.TensorShape(__snake_case ): return dynamic A__ : Dict =tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )] def __lowerCamelCase ( __snake_case : tf.Tensor, __snake_case : Optional[int] = None, __snake_case : Optional[str] = None ) -> tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9, axis=__snake_case, name=__snake_case ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : int, __snake_case : int, __snake_case : Optional[int]=1E-5, __snake_case : str=-1 ) -> int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case, __snake_case ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized A__ , A__ : List[Any] =tf.nn.moments(__snake_case, axes=[axis], keepdims=__snake_case ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis A__ : str =[1] * inputs.shape.rank A__ : List[str] =shape_list(__snake_case )[axis] A__ : Dict =tf.reshape(__snake_case, __snake_case ) A__ : List[Any] =tf.reshape(__snake_case, __snake_case ) # Compute layer normalization using the batch_normalization # function. A__ : Optional[Any] =tf.nn.batch_normalization( __snake_case, __snake_case, __snake_case, offset=__snake_case, scale=__snake_case, variance_epsilon=__snake_case, ) return outputs def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any]=0, __snake_case : Union[str, Any]=-1 ) -> int: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input A__ : Any =tf.shape(__snake_case ) A__ : Dict =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) A__ : Tuple =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : tf.Tensor ) -> tf.Tensor: """simple docstring""" if not isinstance(__snake_case, tf.Tensor ): A__ : List[Any] =tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: A__ : Optional[Any] =encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: A__ : Any =encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) A__ : Optional[int] =( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowerCamelCase ( __snake_case : tf.Tensor, __snake_case : int, __snake_case : str = "input_ids" ) -> None: """simple docstring""" tf.debugging.assert_less( __snake_case, tf.cast(__snake_case, dtype=tensor.dtype ), message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ), ) def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict, __snake_case : str ) -> int: """simple docstring""" A__ : Union[str, Any] =64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. A__ : str =[x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) A__ : int =np.asarray(__snake_case ) A__ : Optional[Any] =1 A__ : Any =np.array_split(__snake_case, __snake_case ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 A__ : Any =np.array_split(__snake_case, __snake_case ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__snake_case ): A__ : int =chunk_data else: A__ : int =data def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str] ) -> int: """simple docstring""" if name in group.attrs: A__ : List[str] =[n.decode("""utf8""" ) if hasattr(__snake_case, """decode""" ) else n for n in group.attrs[name]] else: A__ : List[str] =[] A__ : Dict =0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(__snake_case, """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def __lowerCamelCase ( __snake_case : List[str] ) -> Tuple: """simple docstring""" def _expand_single_ad_tensor(__snake_case : Tuple ): if isinstance(__snake_case, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__snake_case, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, __snake_case )
687
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __snake_case : Dict = datasets.logging.get_logger(__name__) __snake_case : Dict = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __snake_case : List[str] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __snake_case : Tuple = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __snake_case : Optional[Any] = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowercase__ ( self : List[str] ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def lowercase__ ( self : int , lowerCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) A__ : List[str] ="""bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: A__ : int =self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: A__ : Optional[Any] =self.config_name.upper() else: raise KeyError( f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" ) # download the model checkpoint specified by self.config_name and set up the scorer A__ : str =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) A__ : Dict =score.BleurtScorer(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] =self.scorer.score(references=lowerCAmelCase_ , candidates=lowerCAmelCase_ ) return {"scores": scores}
687
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'conditional_detr' __snake_case = ['past_key_values'] __snake_case = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Tuple =backbone_config.get("""model_type""" ) A__ : List[str] =CONFIG_MAPPING[backbone_model_type] A__ : Dict =config_class.from_dict(lowerCAmelCase_ ) A__ : int =use_timm_backbone A__ : List[Any] =backbone_config A__ : Optional[int] =num_channels A__ : Optional[int] =num_queries A__ : Union[str, Any] =d_model A__ : Optional[int] =encoder_ffn_dim A__ : Optional[Any] =encoder_layers A__ : int =encoder_attention_heads A__ : Optional[Any] =decoder_ffn_dim A__ : Tuple =decoder_layers A__ : Optional[Any] =decoder_attention_heads A__ : Tuple =dropout A__ : int =attention_dropout A__ : Dict =activation_dropout A__ : Union[str, Any] =activation_function A__ : List[str] =init_std A__ : str =init_xavier_std A__ : int =encoder_layerdrop A__ : List[Any] =decoder_layerdrop A__ : Tuple =encoder_layers A__ : Tuple =auxiliary_loss A__ : List[Any] =position_embedding_type A__ : int =backbone A__ : Optional[int] =use_pretrained_backbone A__ : str =dilation # Hungarian matcher A__ : Any =class_cost A__ : str =bbox_cost A__ : str =giou_cost # Loss coefficients A__ : Union[str, Any] =mask_loss_coefficient A__ : int =dice_loss_coefficient A__ : Union[str, Any] =cls_loss_coefficient A__ : List[str] =bbox_loss_coefficient A__ : str =giou_loss_coefficient A__ : Optional[Any] =focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return self.d_model def lowercase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : int =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A__ : str =self.backbone_config.to_dict() A__ : int =self.__class__.model_type return output class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowercase__ ( self : Any ) -> float: '''simple docstring''' return 1e-5 @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return 12
687
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : List[str] = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
1
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple: '''simple docstring''' A__ : Tuple =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : Optional[Any] =is_training A__ : Dict =use_input_lengths A__ : int =use_token_type_ids A__ : Union[str, Any] =use_labels A__ : Optional[Any] =gelu_activation A__ : List[Any] =sinusoidal_embeddings A__ : List[Any] =causal A__ : str =asm A__ : Tuple =n_langs A__ : Dict =vocab_size A__ : Optional[Any] =n_special A__ : Tuple =hidden_size A__ : Dict =num_hidden_layers A__ : int =num_attention_heads A__ : Optional[Any] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Optional[int] =type_sequence_label_size A__ : Tuple =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Optional[int] =summary_type A__ : int =use_proj A__ : Tuple =scope A__ : Union[str, Any] =bos_token_id def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_input_lengths: A__ : Tuple =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ : Optional[Any] =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ : Any =None A__ : Tuple =None A__ : Optional[Any] =None if self.use_labels: A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float() A__ : str =ids_tensor([self.batch_size] , self.num_choices ) A__ : Union[str, Any] =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XLMModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Tuple =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) A__ : List[Any] =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Tuple =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , ) A__ : Optional[Any] =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , ) ((A__) , ) : List[Any] =result_with_labels.to_tuple() A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) ((A__) , ) : Tuple =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =self.num_choices A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[int] =config_and_inputs A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __snake_case = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int: '''simple docstring''' A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) A__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =XLMModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : Tuple =min_length + idx + 1 A__ : Tuple =min_length + idx + 1 A__ : Dict =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) ) def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : str =min_length + idx + 1 A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , ) pass @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCAmelCase_ ) A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president A__ : Optional[Any] =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
687
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __snake_case : Dict = Mapping[str, np.ndarray] __snake_case : Optional[Any] = Mapping[str, Any] # Is a nested dict. __snake_case : Optional[Any] = 0.01 @dataclasses.dataclass(frozen=lowercase_ ) class lowerCamelCase : '''simple docstring''' __snake_case = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. __snake_case = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. __snake_case = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. __snake_case = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. __snake_case = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions __snake_case = None # Optional remark about the protein. Included as a comment in output PDB # files __snake_case = None # Templates used to generate this protein (prediction-only) __snake_case = None # Chain corresponding to each parent __snake_case = None def __lowerCamelCase ( __snake_case : str ) -> Protein: """simple docstring""" A__ : str =r"""(\[[A-Z]+\]\n)""" A__ : List[str] =[tag.strip() for tag in re.split(__snake_case, __snake_case ) if len(__snake_case ) > 0] A__ : Iterator[Tuple[str, List[str]]] =zip(tags[0::2], [l.split("""\n""" ) for l in tags[1::2]] ) A__ : List[str] =["N", "CA", "C"] A__ : Optional[int] =None A__ : int =None A__ : Dict =None for g in groups: if "[PRIMARY]" == g[0]: A__ : Optional[int] =g[1][0].strip() for i in range(len(__snake_case ) ): if seq[i] not in residue_constants.restypes: A__ : List[str] ="""X""" # FIXME: strings are immutable A__ : Optional[Any] =np.array( [residue_constants.restype_order.get(__snake_case, residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: A__ : List[List[float]] =[] for axis in range(3 ): tertiary.append(list(map(__snake_case, g[1][axis].split() ) ) ) A__ : Optional[int] =np.array(__snake_case ) A__ : Dict =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__snake_case ): A__ : Optional[int] =np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: A__ : Tuple =np.array(list(map({"""-""": 0, """+""": 1}.get, g[1][0].strip() ) ) ) A__ : Dict =np.zeros( ( len(__snake_case ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__snake_case ): A__ : Optional[int] =1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__snake_case, atom_mask=__snake_case, aatype=__snake_case, residue_index=np.arange(len(__snake_case ) ), b_factors=__snake_case, ) def __lowerCamelCase ( __snake_case : Protein, __snake_case : int = 0 ) -> List[str]: """simple docstring""" A__ : List[str] =[] A__ : str =prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) A__ : Optional[Any] =prot.parents A__ : Any =prot.parents_chain_index if parents is not None and parents_chain_index is not None: A__ : Tuple =[p for i, p in zip(__snake_case, __snake_case ) if i == chain_id] if parents is None or len(__snake_case ) == 0: A__ : Optional[Any] =["""N/A"""] pdb_headers.append(f"PARENT {' '.join(__snake_case )}" ) return pdb_headers def __lowerCamelCase ( __snake_case : Protein, __snake_case : str ) -> str: """simple docstring""" A__ : List[str] =[] A__ : Union[str, Any] =pdb_str.split("""\n""" ) A__ : Dict =prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) A__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: A__ : str =[] if prot.parents_chain_index is not None: A__ : Dict[str, List[str]] ={} for p, i in zip(prot.parents, prot.parents_chain_index ): parent_dict.setdefault(str(__snake_case ), [] ) parent_dict[str(__snake_case )].append(__snake_case ) A__ : Any =max([int(__snake_case ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): A__ : List[str] =parent_dict.get(str(__snake_case ), ["""N/A"""] ) parents_per_chain.append(__snake_case ) else: parents_per_chain.append(list(prot.parents ) ) else: A__ : Dict =[["""N/A"""]] def make_parent_line(__snake_case : Sequence[str] ) -> str: return f"PARENT {' '.join(__snake_case )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) A__ : Any =0 for i, l in enumerate(__snake_case ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__snake_case ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__snake_case ): A__ : Dict =parents_per_chain[chain_counter] else: A__ : List[str] =["""N/A"""] out_pdb_lines.append(make_parent_line(__snake_case ) ) return "\n".join(__snake_case ) def __lowerCamelCase ( __snake_case : Protein ) -> str: """simple docstring""" A__ : Any =residue_constants.restypes + ["""X"""] def res_atoa(__snake_case : int ) -> str: return residue_constants.restype_atoa.get(restypes[r], """UNK""" ) A__ : Any =residue_constants.atom_types A__ : List[str] =[] A__ : Union[str, Any] =prot.atom_mask A__ : List[str] =prot.aatype A__ : Union[str, Any] =prot.atom_positions A__ : List[Any] =prot.residue_index.astype(np.intaa ) A__ : Union[str, Any] =prot.b_factors A__ : int =prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("""Invalid aatypes.""" ) A__ : Dict =get_pdb_headers(__snake_case ) if len(__snake_case ) > 0: pdb_lines.extend(__snake_case ) A__ : Tuple =aatype.shape[0] A__ : Optional[Any] =1 A__ : List[str] =0 A__ : Union[str, Any] =string.ascii_uppercase A__ : List[str] =None # Add all atom sites. for i in range(__snake_case ): A__ : Tuple =res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__snake_case, atom_positions[i], atom_mask[i], b_factors[i] ): if mask < 0.5: continue A__ : List[Any] ="""ATOM""" A__ : Union[str, Any] =atom_name if len(__snake_case ) == 4 else f" {atom_name}" A__ : List[str] ="""""" A__ : Any ="""""" A__ : Optional[Any] =1.00 A__ : str =atom_name[0] # Protein supports only C, N, O, S, this works. A__ : Tuple ="""""" A__ : List[str] ="""A""" if chain_index is not None: A__ : int =chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! A__ : List[str] =( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(__snake_case ) atom_index += 1 A__ : List[str] =i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: A__ : Optional[Any] =True A__ : int =chain_index[i + 1] if should_terminate: # Close the chain. A__ : int ="""TER""" A__ : Optional[int] =( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(__snake_case ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__snake_case, __snake_case ) ) pdb_lines.append("""END""" ) pdb_lines.append("""""" ) return "\n".join(__snake_case ) def __lowerCamelCase ( __snake_case : Protein ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def __lowerCamelCase ( __snake_case : FeatureDict, __snake_case : ModelOutput, __snake_case : Optional[np.ndarray] = None, __snake_case : Optional[np.ndarray] = None, __snake_case : Optional[str] = None, __snake_case : Optional[Sequence[str]] = None, __snake_case : Optional[Sequence[int]] = None, ) -> Protein: """simple docstring""" return Protein( aatype=features["""aatype"""], atom_positions=result["""final_atom_positions"""], atom_mask=result["""final_atom_mask"""], residue_index=features["""residue_index"""] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ), chain_index=__snake_case, remark=__snake_case, parents=__snake_case, parents_chain_index=__snake_case, )
687
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __snake_case : str = False class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int ="""A painting of a squirrel eating a burger """ A__ : Tuple =torch.manual_seed(0 ) A__ : int =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int =generator.manual_seed(0 ) A__ : Tuple =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict ="""A painting of a squirrel eating a burger """ A__ : Optional[int] =torch.manual_seed(0 ) A__ : List[str] =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int = 3, __snake_case : int = 7, __snake_case : int = 1_000_000 ) -> int: """simple docstring""" A__ : int =0 A__ : Tuple =1 for current_denominator in range(1, limit + 1 ): A__ : Union[str, Any] =current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: A__ : Optional[Any] =current_numerator A__ : int =current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=100_0000))
687
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Optional[Any] =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Optional[Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' A__ : Dict =self.encoder(lowerCAmelCase_ ) A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ ) else: A__ : List[str] =h A__ : Dict =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' A__ : Optional[int] =sample A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents A__ : Tuple =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
687
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __snake_case : Tuple = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __snake_case : Optional[Any] = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __snake_case : Any = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __snake_case : Optional[int] = { 'num_train_timesteps': 40, 'sigma_min': 0.002, 'sigma_max': 80.0, } __snake_case : Union[str, Any] = { 'num_train_timesteps': 201, 'sigma_min': 0.002, 'sigma_max': 80.0, } __snake_case : Optional[Any] = { 'num_train_timesteps': 151, 'sigma_min': 0.002, 'sigma_max': 80.0, } def __lowerCamelCase ( __snake_case : str ) -> Any: """simple docstring""" if isinstance(__snake_case, __snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : Dict=False ) -> int: """simple docstring""" A__ : Tuple =checkpoint[f"{old_prefix}.in_layers.0.weight"] A__ : List[Any] =checkpoint[f"{old_prefix}.in_layers.0.bias"] A__ : Optional[Any] =checkpoint[f"{old_prefix}.in_layers.2.weight"] A__ : Dict =checkpoint[f"{old_prefix}.in_layers.2.bias"] A__ : List[Any] =checkpoint[f"{old_prefix}.emb_layers.1.weight"] A__ : Any =checkpoint[f"{old_prefix}.emb_layers.1.bias"] A__ : List[Any] =checkpoint[f"{old_prefix}.out_layers.0.weight"] A__ : str =checkpoint[f"{old_prefix}.out_layers.0.bias"] A__ : int =checkpoint[f"{old_prefix}.out_layers.3.weight"] A__ : str =checkpoint[f"{old_prefix}.out_layers.3.bias"] if has_skip: A__ : Union[str, Any] =checkpoint[f"{old_prefix}.skip_connection.weight"] A__ : Any =checkpoint[f"{old_prefix}.skip_connection.bias"] return new_checkpoint def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[int], __snake_case : int, __snake_case : Union[str, Any], __snake_case : Optional[Any]=None ) -> Any: """simple docstring""" A__ , A__ , A__ : Optional[int] =checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0 ) A__ , A__ , A__ : str =checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0 ) A__ : str =checkpoint[f"{old_prefix}.norm.weight"] A__ : Dict =checkpoint[f"{old_prefix}.norm.bias"] A__ : str =weight_q.squeeze(-1 ).squeeze(-1 ) A__ : Any =bias_q.squeeze(-1 ).squeeze(-1 ) A__ : Optional[Any] =weight_k.squeeze(-1 ).squeeze(-1 ) A__ : List[str] =bias_k.squeeze(-1 ).squeeze(-1 ) A__ : Optional[Any] =weight_v.squeeze(-1 ).squeeze(-1 ) A__ : Optional[Any] =bias_v.squeeze(-1 ).squeeze(-1 ) A__ : int =( checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 ) ) A__ : Dict =checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __lowerCamelCase ( __snake_case : str, __snake_case : List[Any] ) -> Any: """simple docstring""" A__ : int =torch.load(__snake_case, map_location="""cpu""" ) A__ : Union[str, Any] ={} A__ : Dict =checkpoint["""time_embed.0.weight"""] A__ : Optional[int] =checkpoint["""time_embed.0.bias"""] A__ : Optional[int] =checkpoint["""time_embed.2.weight"""] A__ : Any =checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: A__ : List[str] =checkpoint["""label_emb.weight"""] A__ : Union[str, Any] =checkpoint["""input_blocks.0.0.weight"""] A__ : Dict =checkpoint["""input_blocks.0.0.bias"""] A__ : Union[str, Any] =unet_config["""down_block_types"""] A__ : List[str] =unet_config["""layers_per_block"""] A__ : Union[str, Any] =unet_config["""attention_head_dim"""] A__ : List[Any] =unet_config["""block_out_channels"""] A__ : List[Any] =1 A__ : Tuple =channels_list[0] for i, layer_type in enumerate(__snake_case ): A__ : List[Any] =channels_list[i] A__ : Union[str, Any] =current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__snake_case ): A__ : str =f"down_blocks.{i}.resnets.{j}" A__ : Dict =f"input_blocks.{current_layer}.0" A__ : Dict =True if j == 0 and downsample_block_has_skip else False A__ : Union[str, Any] =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case, has_skip=__snake_case ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__snake_case ): A__ : List[Any] =f"down_blocks.{i}.resnets.{j}" A__ : Dict =f"input_blocks.{current_layer}.0" A__ : List[Any] =True if j == 0 and downsample_block_has_skip else False A__ : str =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case, has_skip=__snake_case ) A__ : Dict =f"down_blocks.{i}.attentions.{j}" A__ : Any =f"input_blocks.{current_layer}.1" A__ : Dict =convert_attention( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A__ : Union[str, Any] =f"down_blocks.{i}.downsamplers.0" A__ : int =f"input_blocks.{current_layer}.0" A__ : Dict =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case ) current_layer += 1 A__ : int =current_channels # hardcoded the mid-block for now A__ : Optional[Any] ="""mid_block.resnets.0""" A__ : Dict ="""middle_block.0""" A__ : Union[str, Any] =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case ) A__ : Union[str, Any] ="""mid_block.attentions.0""" A__ : Tuple ="""middle_block.1""" A__ : int =convert_attention(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) A__ : Optional[Any] ="""mid_block.resnets.1""" A__ : List[str] ="""middle_block.2""" A__ : Optional[int] =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case ) A__ : Tuple =0 A__ : Dict =unet_config["""up_block_types"""] for i, layer_type in enumerate(__snake_case ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): A__ : Any =f"up_blocks.{i}.resnets.{j}" A__ : Any =f"output_blocks.{current_layer}.0" A__ : List[Any] =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case, has_skip=__snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A__ : str =f"up_blocks.{i}.upsamplers.0" A__ : Dict =f"output_blocks.{current_layer-1}.1" A__ : Union[str, Any] =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): A__ : Tuple =f"up_blocks.{i}.resnets.{j}" A__ : Dict =f"output_blocks.{current_layer}.0" A__ : str =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case, has_skip=__snake_case ) A__ : List[Any] =f"up_blocks.{i}.attentions.{j}" A__ : List[str] =f"output_blocks.{current_layer}.1" A__ : Any =convert_attention( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A__ : List[str] =f"up_blocks.{i}.upsamplers.0" A__ : Optional[Any] =f"output_blocks.{current_layer-1}.2" A__ : str =convert_resnet(__snake_case, __snake_case, __snake_case, __snake_case ) A__ : Optional[int] =checkpoint["""out.0.weight"""] A__ : Any =checkpoint["""out.0.bias"""] A__ : Optional[int] =checkpoint["""out.2.weight"""] A__ : Any =checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') __snake_case : str = parser.parse_args() __snake_case : Optional[int] = strabool(args.class_cond) __snake_case : Optional[int] = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: __snake_case : List[str] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : List[Any] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __snake_case : Any = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: __snake_case : List[Any] = None __snake_case : int = con_pt_to_diffuser(args.unet_path, unet_config) __snake_case : Tuple = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __snake_case : Union[str, Any] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") __snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) __snake_case : List[str] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __snake_case : str = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __snake_case : List[Any] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str: """simple docstring""" A__ : Optional[int] =set() A__ : Optional[int] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ : str =char A__ : List[Any] =set(__snake_case ) return pairs class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =vocab_file A__ : Any =merges_file A__ : Union[str, Any] ={} A__ : Optional[int] =0 A__ : List[Any] =1 A__ : Tuple =2 A__ : Dict =3 self.add_from_file(lowerCAmelCase_ ) A__ : List[str] ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: A__ : str =merges_handle.read().split("""\n""" )[:-1] A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges] A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Dict ={} def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ : Dict =[self.cls_token_id] A__ : Union[str, Any] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ : int =tuple(lowerCAmelCase_ ) A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) A__ : Tuple =get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ : Tuple =bigram A__ : Optional[int] =[] A__ : Tuple =0 while i < len(lowerCAmelCase_ ): try: A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ : Union[str, Any] =j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ : Dict =tuple(lowerCAmelCase_ ) A__ : Dict =new_word if len(lowerCAmelCase_ ) == 1: break else: A__ : str =get_pairs(lowerCAmelCase_ ) A__ : Dict ="""@@ """.join(lowerCAmelCase_ ) A__ : Tuple =word[:-4] A__ : Any =word return word def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : int =[] A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : Optional[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.merges_file , lowerCAmelCase_ ) return out_vocab_file, out_merge_file def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return A__ : Union[str, Any] =f.readlines() for lineTmp in lines: A__ : List[Any] =lineTmp.strip() A__ : Dict =line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) A__ : Tuple =line[:idx] A__ : Tuple =len(self.encoder )
687
1
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'linear' __snake_case = 'cosine' __snake_case = 'cosine_with_restarts' __snake_case = 'polynomial' __snake_case = 'constant' __snake_case = 'constant_with_warmup' __snake_case = 'piecewise_constant' def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]: """simple docstring""" return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1.0, __snake_case ) ) return 1.0 return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]: """simple docstring""" A__ : str ={} A__ : Tuple =step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A__ , A__ : int =rule_str.split(""":""" ) A__ : Optional[int] =int(__snake_case ) A__ : List[Any] =float(__snake_case ) A__ : Union[str, Any] =value A__ : int =float(rule_list[-1] ) def create_rules_function(__snake_case : int, __snake_case : Dict ): def rule_func(__snake_case : int ) -> float: A__ : Any =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__snake_case ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A__ : Any =create_rules_function(__snake_case, __snake_case ) return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) return max( 0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : Dict ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]: """simple docstring""" A__ : Optional[int] =optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A__ : List[Any] =lr_init - lr_end A__ : Any =num_training_steps - num_warmup_steps A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps A__ : List[str] =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__snake_case, __snake_case, __snake_case ) __snake_case : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple: """simple docstring""" A__ : Tuple =SchedulerType(__snake_case ) A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__snake_case, last_epoch=__snake_case ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, ) return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
687
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int: """simple docstring""" A__ : Union[str, Any] =nn.functional.normalize(__snake_case ) A__ : Optional[Any] =nn.functional.normalize(__snake_case ) return torch.mm(__snake_case, normalized_text_embeds.t() ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ['CLIPEncoderLayer'] def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A__ : str =CLIPVisionModel(config.vision_config ) A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ ) A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ ) A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ ) @torch.no_grad() def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : Any =self.visual_projection(lowerCAmelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy() A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy() A__ : List[str] =[] A__ : Optional[int] =image_embeds.shape[0] for i in range(lowerCAmelCase_ ): A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ : Optional[Any] =special_cos_dist[i][concept_idx] A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) A__ : Dict =0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ : Optional[int] =cos_dist[i][concept_idx] A__ : List[str] =self.concept_embeds_weights[concept_idx].item() A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : List[Any] =self.visual_projection(lowerCAmelCase_ ) A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ) A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ : Dict =0.0 A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 ) A__ : Tuple =special_care * 0.01 A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Union[str, Any] = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[str] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[str] = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str: """simple docstring""" A__ : Optional[int] =[] for part_id in partition_order: A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(__snake_case ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : str =spark.range(100 ).repartition(1 ) A__ : List[str] =Spark(__snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Tuple =spark.range(10 ).repartition(2 ) A__ : List[str] =[1, 0] A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions. A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(10 ).repartition(1 ) A__ : List[str] =SparkExamplesIterable(__snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__snake_case ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A__ : Tuple =lambda __snake_case : x.reverse() A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] ) A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Any =spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : List[str] =spark.range(100 ).repartition(1 ) A__ : List[Any] =Spark(__snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
687
1
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case : Optional[int] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') __snake_case : int = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') __snake_case : Optional[Any] = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') __snake_case : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') __snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
687
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : int = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __snake_case : Optional[Any] = get_tests_dir('fixtures') __snake_case : Optional[int] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') __snake_case : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json') class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' A__ : Tuple =0 def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Tuple: '''simple docstring''' A__ : Tuple =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally A__ : Tuple =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) A__ : Any =WavaVecaFeatureExtractor(**lowerCAmelCase_ ) # save in new folder model_config.save_pretrained(lowerCAmelCase_ ) config.save_pretrained(lowerCAmelCase_ ) A__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) # make sure private variable is not incorrectly saved A__ : Any =json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : List[Any] =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): A__ : Optional[int] =AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowercase__ ( self : List[str] ) -> Tuple: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A__ : int =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , revision="""aaaaaa""" ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): A__ : Optional[int] =AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowercase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase_ ): A__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase_ ) A__ : int =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase_ ) A__ : Any =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowercase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' try: AutoConfig.register("""custom""" , lowerCAmelCase_ ) AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase_ ): AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API A__ : List[str] =CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase_ ) A__ : List[Any] =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Any ) -> str: '''simple docstring''' class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = True try: AutoConfig.register("""custom""" , lowerCAmelCase_ ) AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ ) # If remote code is not set, the default is to use local A__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. A__ : Dict =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub A__ : Optional[int] =AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCAmelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
687
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str: '''simple docstring''' super().__init__() A__ : Union[str, Any] =module A__ : Union[str, Any] =nn.Sequential( nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , ) A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109659552692574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 10 def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' # Models and tokenizer A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Models and tokenizer A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : str =self.model_abit.config self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) ) A__ : Union[str, Any] =config.to_dict() A__ : Any =config.to_diff_dict() A__ : Optional[Any] =config.to_json_string() def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' from bitsandbytes.nn import Paramsabit A__ : int =self.model_fpaa.get_memory_footprint() A__ : Optional[Any] =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A__ : Tuple =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCAmelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() A__ : Tuple =True A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" ) A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =self.model_fpaa.to(torch.floataa ) A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.half() # Check this does not throw an error A__ : int =self.model_fpaa.float() def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowercase__ ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Tuple ="""t5-small""" A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name ) A__ : Optional[int] ="""Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules A__ : Optional[Any] =None # test with `t5-small` A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Optional[Any] =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : List[str] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ ) A__ : Dict =modules def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Any =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Dict =model.generate(**lowerCAmelCase_ ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' super().setUp() # model_name A__ : Any ="""bigscience/bloom-560m""" A__ : List[Any] ="""t5-small""" # Different types of model A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Sequence classification model A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # CausalLM model A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Seq2seq model A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUp() def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A__ : Optional[int] =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : str ) -> int: '''simple docstring''' super().setUp() def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] ="""facebook/opt-350m""" super().setUp() def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A__ : int =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A__ : Dict =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCAmelCase_ ) ): A__ : int =LoRALayer(module.q_proj , rank=16 ) A__ : Any =LoRALayer(module.k_proj , rank=16 ) A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A__ : Any =model.forward(**lowerCAmelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCAmelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191854854152187
687
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase_ : Callable , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]: '''simple docstring''' super().__init__( features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : Optional[int] =Generator( cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , generator=lowerCAmelCase_ , gen_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ , ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' # Build iterable dataset if self.streaming: A__ : List[Any] =self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: A__ : List[Any] =None A__ : List[Any] =None A__ : int =None A__ : str =None self.builder.download_and_prepare( download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , ) A__ : str =self.builder.as_dataset( split="""train""" , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory ) return dataset
687
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __snake_case : Optional[int] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> None: '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
687
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __snake_case : Tuple = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') __snake_case : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) __snake_case : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : '''simple docstring''' __snake_case = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __snake_case = field( default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __snake_case = field( default=lowercase_ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __snake_case = field(default=lowercase_ , metadata={'help': 'A folder containing the training data.'} ) __snake_case = field(default=lowercase_ , metadata={'help': 'A folder containing the validation data.'} ) __snake_case = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) __snake_case = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) __snake_case = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def lowercase__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A__ : Dict ={} if self.train_dir is not None: A__ : Dict =self.train_dir if self.validation_dir is not None: A__ : str =self.validation_dir A__ : Any =data_files if data_files else None @dataclass class lowerCamelCase : '''simple docstring''' __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __snake_case = field( default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , ) __snake_case = field( default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __snake_case = field( default=lowercase_ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __snake_case = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __snake_case = field(default=lowercase_ , metadata={'help': 'Name or path of preprocessor config.'} ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __snake_case = field( default=lowercase_ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __snake_case = field( default=lowercase_ , metadata={'help': 'Stride to use for the encoder.'} , ) class lowerCamelCase : '''simple docstring''' def __init__( self : int , lowerCAmelCase_ : List[str]=1_92 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[Any]=0.6 ) -> str: '''simple docstring''' A__ : List[Any] =input_size A__ : Union[str, Any] =mask_patch_size A__ : List[Any] =model_patch_size A__ : str =mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) A__ : List[str] =self.input_size // self.mask_patch_size A__ : Optional[Any] =self.mask_patch_size // self.model_patch_size A__ : Optional[Any] =self.rand_size**2 A__ : Tuple =int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Union[str, Any] =np.random.permutation(self.token_count )[: self.mask_count] A__ : Any =np.zeros(self.token_count , dtype=lowerCAmelCase_ ) A__ : Tuple =1 A__ : Dict =mask.reshape((self.rand_size, self.rand_size) ) A__ : str =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =torch.stack([example["""pixel_values"""] for example in examples] ) A__ : int =torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" A__ : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__ , A__ , A__ : Any =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__ , A__ , A__ : List[str] =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""", __snake_case, __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A__ : List[Any] =training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. A__ : Optional[int] =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A__ : int =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. A__ : Dict =load_dataset( data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # If we don't have a validation split, split off a percentage of train as validation. A__ : Any =None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, __snake_case ) and data_args.train_val_split > 0.0: A__ : Tuple =ds["""train"""].train_test_split(data_args.train_val_split ) A__ : Union[str, Any] =split["""train"""] A__ : Optional[int] =split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ : Union[str, Any] ={ """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: A__ : Union[str, Any] =AutoConfig.from_pretrained(model_args.config_name_or_path, **__snake_case ) elif model_args.model_name_or_path: A__ : List[Any] =AutoConfig.from_pretrained(model_args.model_name_or_path, **__snake_case ) else: A__ : str =CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(f"New config: {config}" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__snake_case, """decoder_type""" ): A__ : Tuple ="""simmim""" # adapt config A__ : Tuple =model_args.image_size if model_args.image_size is not None else config.image_size A__ : int =model_args.patch_size if model_args.patch_size is not None else config.patch_size A__ : List[str] =( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: A__ : str =AutoImageProcessor.from_pretrained(model_args.image_processor_name, **__snake_case ) elif model_args.model_name_or_path: A__ : Tuple =AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **__snake_case ) else: A__ : List[Any] ={ conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } A__ : List[str] =IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: A__ : List[str] =AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("""Training new model from scratch""" ) A__ : Dict =AutoModelForMaskedImageModeling.from_config(__snake_case ) if training_args.do_train: A__ : Any =ds["""train"""].column_names else: A__ : Dict =ds["""validation"""].column_names if data_args.image_column_name is not None: A__ : Tuple =data_args.image_column_name elif "image" in column_names: A__ : List[Any] ="""image""" elif "img" in column_names: A__ : Dict ="""img""" else: A__ : str =column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py A__ : str =Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean, std=image_processor.image_std ), ] ) # create mask generator A__ : Union[str, Any] =MaskGenerator( input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, ) def preprocess_images(__snake_case : Union[str, Any] ): A__ : List[str] =[transforms(__snake_case ) for image in examples[image_column_name]] A__ : List[str] =[mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: A__ : int =ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: A__ : Union[str, Any] =( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Initialize our trainer A__ : List[str] =Trainer( model=__snake_case, args=__snake_case, train_dataset=ds["""train"""] if training_args.do_train else None, eval_dataset=ds["""validation"""] if training_args.do_eval else None, tokenizer=__snake_case, data_collator=__snake_case, ) # Training if training_args.do_train: A__ : Tuple =None if training_args.resume_from_checkpoint is not None: A__ : Optional[int] =training_args.resume_from_checkpoint elif last_checkpoint is not None: A__ : Tuple =last_checkpoint A__ : List[Any] =trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""", train_result.metrics ) trainer.save_metrics("""train""", train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A__ : List[str] =trainer.evaluate() trainer.log_metrics("""eval""", __snake_case ) trainer.save_metrics("""eval""", __snake_case ) # Write model card and (optionally) push to hub A__ : List[Any] ={ """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) if __name__ == "__main__": main()
687
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple: '''simple docstring''' A__ : Tuple =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : Optional[Any] =is_training A__ : Dict =use_input_lengths A__ : int =use_token_type_ids A__ : Union[str, Any] =use_labels A__ : Optional[Any] =gelu_activation A__ : List[Any] =sinusoidal_embeddings A__ : List[Any] =causal A__ : str =asm A__ : Tuple =n_langs A__ : Dict =vocab_size A__ : Optional[Any] =n_special A__ : Tuple =hidden_size A__ : Dict =num_hidden_layers A__ : int =num_attention_heads A__ : Optional[Any] =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Optional[int] =type_sequence_label_size A__ : Tuple =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Optional[int] =summary_type A__ : int =use_proj A__ : Tuple =scope A__ : Union[str, Any] =bos_token_id def lowercase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_input_lengths: A__ : Tuple =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ : Optional[Any] =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ : Any =None A__ : Tuple =None A__ : Optional[Any] =None if self.use_labels: A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float() A__ : str =ids_tensor([self.batch_size] , self.num_choices ) A__ : Union[str, Any] =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XLMModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ ) A__ : Tuple =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str: '''simple docstring''' A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) A__ : List[Any] =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : List[str] =model(lowerCAmelCase_ ) A__ : Tuple =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , ) A__ : Optional[Any] =model( lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , ) ((A__) , ) : List[Any] =result_with_labels.to_tuple() A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ ) ((A__) , ) : Tuple =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any: '''simple docstring''' A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : str =model(lowerCAmelCase_ ) A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =self.num_choices A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Dict =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[int] =config_and_inputs A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __snake_case = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int: '''simple docstring''' A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) A__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Dict =XLMModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 ) def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ ) def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : Tuple =min_length + idx + 1 A__ : Tuple =min_length + idx + 1 A__ : Dict =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) ) def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any: '''simple docstring''' self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual( [isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , ) self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ): # adds PAD dummy token A__ : str =min_length + idx + 1 A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , ) pass @slow def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCAmelCase_ ) A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president A__ : Optional[Any] =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
687
1
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __snake_case : Tuple = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : bool, __snake_case : bool ) -> Any: """simple docstring""" def run_func(__snake_case : Dict ): @wraps(__snake_case ) def run_in_eager_mode(*__snake_case : Union[str, Any], **__snake_case : Union[str, Any] ): return func(*__snake_case, **__snake_case ) @wraps(__snake_case ) @tf.function(experimental_compile=__snake_case ) def run_in_graph_mode(*__snake_case : Any, **__snake_case : Optional[int] ): return func(*__snake_case, **__snake_case ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : int ) -> ["tf.Tensor"]: """simple docstring""" A__ : Union[str, Any] =random.Random() A__ : Dict =[rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__snake_case, shape=(batch_size, sequence_length), dtype=tf.intaa ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 __snake_case = 42 __snake_case = "TensorFlow" @property def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' return tf.__version__ def lowercase__ ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float: '''simple docstring''' # initialize GPU on separate process A__ : List[Any] =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) A__ : str =self._prepare_inference_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return self._measure_speed(_inference ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Any =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) A__ : List[str] =self._prepare_train_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return self._measure_speed(_train ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> [Memory, Optional[MemorySummary]]: '''simple docstring''' # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase_ ) A__ : Dict =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) A__ : Tuple =self._prepare_inference_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return self._measure_memory(_inference ) def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> [Memory, Optional[MemorySummary]]: '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase_ ) A__ : Dict =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) A__ : Union[str, Any] =self._prepare_train_func(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return self._measure_memory(_train ) def lowercase__ ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Callable[[], None]: '''simple docstring''' A__ : Union[str, Any] =self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) A__ : int =( hasattr(lowerCAmelCase_ , """architectures""" ) and isinstance(config.architectures , lowerCAmelCase_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: A__ : Dict ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model A__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] ) A__ : int =getattr(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =model_cls(lowerCAmelCase_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: A__ : Optional[int] =TF_MODEL_MAPPING[config.__class__](lowerCAmelCase_ ) # encoder-decoder has vocab size saved differently A__ : Dict =config.vocab_size if hasattr(lowerCAmelCase_ , """vocab_size""" ) else config.encoder.vocab_size A__ : int =random_input_ids(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , training=lowerCAmelCase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCAmelCase_ , training=lowerCAmelCase_ ) A__ : int =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Callable[[], None]: '''simple docstring''' A__ : Optional[int] =self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) A__ : str =( hasattr(lowerCAmelCase_ , """architectures""" ) and isinstance(config.architectures , lowerCAmelCase_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: A__ : Optional[Any] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model A__ : int =__import__("""transformers""" , fromlist=[model_class] ) A__ : str =getattr(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Dict =model_cls(lowerCAmelCase_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: A__ : Dict =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase_ ) # encoder-decoder has vocab size saved differently A__ : Dict =config.vocab_size if hasattr(lowerCAmelCase_ , """vocab_size""" ) else config.encoder.vocab_size A__ : Any =random_input_ids(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): A__ : int =model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )[0] A__ : Tuple =tf.gradients(lowerCAmelCase_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): A__ : Dict =model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )[0] A__ : Union[str, Any] =tf.gradients(lowerCAmelCase_ , model.trainable_variables ) return gradients A__ : Optional[Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> float: '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCAmelCase_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average A__ : Dict =timeit.repeat( lowerCAmelCase_ , repeat=self.args.repeat , number=10 , ) return min(lowerCAmelCase_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def lowercase__ ( self : int , lowerCAmelCase_ : Callable[[], None] ) -> [Memory, MemorySummary]: '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) A__ : Optional[Any] =start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) A__ : str ="""N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() A__ : Optional[int] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) A__ : Union[str, Any] =nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase_ ) A__ : Any =meminfo.used A__ : Tuple =Memory(lowerCAmelCase_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) A__ : Dict =None else: A__ : int =measure_peak_memory_cpu(lowerCAmelCase_ ) A__ : Any =Memory(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else memory_bytes if self.args.trace_memory_line_by_line: A__ : Tuple =stop_memory_tracing(lowerCAmelCase_ ) if memory is None: A__ : Union[str, Any] =summary.total else: A__ : List[Any] =None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
687
'''simple docstring''' from __future__ import annotations import requests __snake_case : Union[str, Any] = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict: """simple docstring""" A__ : Union[str, Any] =wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}" raise ValueError(__snake_case ) A__ : Tuple =requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, ) if response.status_code == 429: raise requests.HTTPError A__ : Tuple =response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} A__ : Tuple ={} for id_ in range(__snake_case ): A__ : List[Any] ={ item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
687
1
'''simple docstring''' def __lowerCamelCase ( __snake_case : int ) -> int: """simple docstring""" assert isinstance(__snake_case, __snake_case ), f"The input value of [n={number}] is not an integer" if number == 1: return 2 elif number < 1: A__ : Any =f"The input value of [n={number}] has to be > 0" raise ValueError(__snake_case ) else: A__ : str =sylvester(number - 1 ) A__ : List[str] =num - 1 A__ : Optional[Any] =num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
687
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __snake_case : Union[str, Any] = logging.getLogger(__name__) __snake_case : int = tf.data.AUTOTUNE def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", ) parser.add_argument( """--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", ) parser.add_argument( """--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", ) parser.add_argument( """--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", ) parser.add_argument( """--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", ) parser.add_argument( """--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", ) parser.add_argument( """--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", ) parser.add_argument( """--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", ) parser.add_argument( """--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""", ) parser.add_argument( """--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", ) parser.add_argument( """--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", ) parser.add_argument( """--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", ) parser.add_argument( """--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", ) parser.add_argument( """--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", ) parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" ) A__ : Optional[Any] =parser.parse_args() return args def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" try: if args.tpu_name: A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__snake_case ) tf.tpu.experimental.initialize_tpu_system(__snake_case ) return tpu def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" A__ : Any =0 for file in file_list: A__ : Optional[int] =file.split("""/""" )[-1] A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 ) A__ : str =int(__snake_case ) num_samples += sample_count return num_samples def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]: """simple docstring""" A__ : List[str] =count_samples(__snake_case ) A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case ) if shuffle: A__ : Optional[int] =dataset.shuffle(len(__snake_case ) ) A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) ) A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case ) if shuffle: assert shuffle_buffer_size is not None A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size ) A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case ) A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case ) A__ : Tuple =dataset.prefetch(__snake_case ) return dataset def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" if not args.no_tpu: A__ : Dict =initialize_tpu(__snake_case ) A__ : int =tf.distribute.TPUStrategy(__snake_case ) else: A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer ) A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) ) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}." ) A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." ) A__ : Optional[Any] =count_samples(__snake_case ) A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A__ : str =steps_per_epoch * args.num_epochs with strategy.scope(): A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A__ , A__ : Optional[Any] =create_optimizer( num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__snake_case, metrics=["""accuracy"""] ) def decode_fn(__snake_case : Tuple ): A__ : Dict ={ """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ), } return tf.io.parse_single_example(__snake_case, __snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A__ : List[Any] =DataCollatorForLanguageModeling( tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" ) def mask_with_collator(__snake_case : Optional[int] ): # TF really needs an isin() function A__ : Union[str, Any] =( ~tf.cast(batch["""attention_mask"""], tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) A__ , A__ : List[str] =data_collator.tf_mask_tokens( batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, ) return batch A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, ) A__ : List[str] =prepare_dataset( __snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, ) A__ : Tuple =[] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) ) model.fit( __snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __snake_case : str = parse_args() main(args)
687
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case : Union[str, Any] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Any = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
1
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __snake_case : Any = 1.0_54_57_18_17E-34 # unit of ℏ : J * s __snake_case : List[Any] = 3E8 # unit of c : m * s^-1 def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> dict[str, float]: """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: A__ : Tuple =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A__ : str =(240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A__ : int =( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
687
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case : Optional[int] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('\n'.join(upper_files) + '\n') __snake_case : int = [file for file in filepaths if ' ' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('\n'.join(space_files) + '\n') __snake_case : Optional[Any] = [file for file in filepaths if '-' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('\n'.join(hyphen_files) + '\n') __snake_case : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('\n'.join(nodir_files) + '\n') __snake_case : Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
687
1
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __snake_case : List[Any] = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> Dict: """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: A__ : Dict =XLMProphetNetForConditionalGenerationOld.from_pretrained(__snake_case ) A__ , A__ : Tuple =XLMProphetNetForConditionalGeneration.from_pretrained( __snake_case, output_loading_info=__snake_case ) else: A__ : str =ProphetNetForConditionalGenerationOld.from_pretrained(__snake_case ) A__ , A__ : Dict =ProphetNetForConditionalGeneration.from_pretrained( __snake_case, output_loading_info=__snake_case ) A__ : Dict =["""key_proj""", """value_proj""", """query_proj"""] A__ : int ={ """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: A__ : List[str] =key.split(""".""" ) if attributes[0] == "lm_head": A__ : int =prophet A__ : Optional[Any] =prophet_old else: A__ : str =prophet.prophetnet A__ : List[Any] =prophet_old.model A__ : Any =False for attribute in attributes: if attribute in mapping: A__ : int =mapping[attribute] if not hasattr(__snake_case, __snake_case ) and len(__snake_case ) > 0: A__ : str =attribute elif hasattr(__snake_case, __snake_case ): A__ : Optional[Any] =attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" A__ : Optional[int] =old_model.weight logger.info(f"{attribute} is initialized." ) A__ : Dict =True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" A__ : Union[str, Any] =old_model.bias logger.info(f"{attribute} is initialized" ) A__ : Tuple =True break elif attribute in special_keys and hasattr(__snake_case, """in_proj_weight""" ): A__ : Optional[Any] =old_model.in_proj_weight.shape[0] // 3 A__ : Dict =getattr(__snake_case, __snake_case ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": A__ : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) A__ : List[str] =nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": A__ : Tuple =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) A__ : Optional[Any] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": A__ : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) A__ : Tuple =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) A__ : Dict =True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." A__ : Optional[Any] =nn.Parameter(old_model.embed_positions.weight[:512, :] ) A__ : int =True break if attribute.isdigit(): A__ : Any =model[int(__snake_case )] A__ : Optional[int] =old_model[int(__snake_case )] else: A__ : int =getattr(__snake_case, __snake_case ) if old_attribute == "": A__ : Tuple =old_model else: if not hasattr(__snake_case, __snake_case ): raise ValueError(f"{old_model} does not have {old_attribute}" ) A__ : Dict =getattr(__snake_case, __snake_case ) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!" ) print(f"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __snake_case : Optional[int] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
687
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str: """simple docstring""" A__ : int =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Any ="""""" else: A__ : Optional[int] ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : Optional[int] =in_proj_weight[ : config.hidden_size, : ] A__ : str =in_proj_bias[: config.hidden_size] A__ : Optional[Any] =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : List[Any] =in_proj_weight[ -config.hidden_size :, : ] A__ : Optional[Any] =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" A__ : Dict =dct.pop(__snake_case ) A__ : Tuple =val def __lowerCamelCase ( ) -> int: """simple docstring""" A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str: """simple docstring""" A__ : Tuple =ViTConfig() # patch_size if model_name[-1] == "8": A__ : Optional[Any] =8 # set labels if required if not base_model: A__ : Optional[Any] =1_000 A__ : str ="""huggingface/label-files""" A__ : Any ="""imagenet-1k-id2label.json""" A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : List[Any] ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: A__ : str =384 A__ : Optional[Any] =1_536 A__ : Optional[Any] =12 A__ : Union[str, Any] =6 # load original model from torch hub A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys A__ : List[str] =original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if base_model: A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval() else: A__ : List[str] =ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor A__ : Union[str, Any] =ViTImageProcessor() A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Union[str, Any] =encoding["""pixel_values"""] A__ : Union[str, Any] =model(__snake_case ) if base_model: A__ : List[str] =original_model(__snake_case ) assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 ) else: A__ : Optional[int] =original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __snake_case : Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
687
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __snake_case : List[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'linear' __snake_case = 'cosine' __snake_case = 'cosine_with_restarts' __snake_case = 'polynomial' __snake_case = 'constant' __snake_case = 'constant_with_warmup' __snake_case = 'piecewise_constant' def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]: """simple docstring""" return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1.0, __snake_case ) ) return 1.0 return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]: """simple docstring""" A__ : str ={} A__ : Tuple =step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A__ , A__ : int =rule_str.split(""":""" ) A__ : Optional[int] =int(__snake_case ) A__ : List[Any] =float(__snake_case ) A__ : Union[str, Any] =value A__ : int =float(rule_list[-1] ) def create_rules_function(__snake_case : int, __snake_case : Dict ): def rule_func(__snake_case : int ) -> float: A__ : Any =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__snake_case ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A__ : Any =create_rules_function(__snake_case, __snake_case ) return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) return max( 0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : Dict ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict: """simple docstring""" def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) ) return LambdaLR(__snake_case, __snake_case, __snake_case ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]: """simple docstring""" A__ : Optional[int] =optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(__snake_case : int ): if current_step < num_warmup_steps: return float(__snake_case ) / float(max(1, __snake_case ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A__ : List[Any] =lr_init - lr_end A__ : Any =num_training_steps - num_warmup_steps A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps A__ : List[str] =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__snake_case, __snake_case, __snake_case ) __snake_case : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple: """simple docstring""" A__ : Tuple =SchedulerType(__snake_case ) A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__snake_case, last_epoch=__snake_case ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, ) return schedule_func( __snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
687
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCamelCase ( _lowerCamelCase ): '''simple docstring''' def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any: '''simple docstring''' A__ : Optional[Any] =params A__ : Any =np.array(A__ ) A__ : int =np.array([len(A__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : str ) -> Tuple: '''simple docstring''' return len(self.lengths ) def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def lowercase__ ( self : Any ) -> List[str]: '''simple docstring''' A__ : str =self.params.max_model_input_size A__ : Dict =self.lengths > max_len logger.info(f"Splitting {sum(A__ )} too long sequences." ) def divide_chunks(lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ): return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )] A__ : int =[] A__ : List[Any] =[] if self.params.mlm: A__ , A__ : Optional[Any] =self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: A__ , A__ : Dict =self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: A__ : List[Any] =[] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: A__ : Optional[int] =np.insert(A__ , 0 , A__ ) if sub_s[-1] != sep_id: A__ : Optional[int] =np.insert(A__ , len(A__ ) , A__ ) assert len(A__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A__ ) new_tok_ids.extend(A__ ) new_lengths.extend([len(A__ ) for l in sub_seqs] ) A__ : Union[str, Any] =np.array(A__ ) A__ : Dict =np.array(A__ ) def lowercase__ ( self : Any ) -> List[str]: '''simple docstring''' A__ : List[str] =len(self ) A__ : str =self.lengths > 11 A__ : Dict =self.token_ids[indices] A__ : Optional[int] =self.lengths[indices] A__ : str =len(self ) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def lowercase__ ( self : int ) -> int: '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: A__ : List[str] =self.params.special_tok_ids["""unk_token"""] A__ : Union[str, Any] =len(self ) A__ : List[Any] =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) A__ : Optional[Any] =(unk_occs / self.lengths) < 0.5 A__ : Tuple =self.token_ids[indices] A__ : Optional[Any] =self.lengths[indices] A__ : Tuple =len(self ) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' if not self.params.is_master: return logger.info(f"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : str =[t[0] for t in batch] A__ : Tuple =[t[1] for t in batch] assert len(A__ ) == len(A__ ) # Max for paddings A__ : Any =max(A__ ) # Pad token ids if self.params.mlm: A__ : int =self.params.special_tok_ids["""pad_token"""] else: A__ : str =self.params.special_tok_ids["""unk_token"""] A__ : Tuple =[list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids] assert len(tk_ ) == len(A__ ) assert all(len(A__ ) == max_seq_len_ for t in tk_ ) A__ : List[Any] =torch.tensor(tk_ ) # (bs, max_seq_len_) A__ : str =torch.tensor(A__ ) # (bs) return tk_t, lg_t
700
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : List[str] = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _snake_case , unittest.TestCase ): '''simple docstring''' __snake_case = DiTPipeline __snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __snake_case = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __snake_case = False def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) A__ : List[Any] =TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCAmelCase__ , ) A__ : Union[str, Any] =AutoencoderKL() A__ : Optional[int] =DDIMScheduler() A__ : int ={"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def lowercase__ ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=0 ) -> Tuple: '''simple docstring''' if str(lowerCAmelCase__ ).startswith("""mps""" ): A__ : Union[str, Any] =torch.manual_seed(lowerCAmelCase__ ) else: A__ : Tuple =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) A__ : Optional[int] ={ """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowercase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' A__ : Tuple ="""cpu""" A__ : Optional[int] =self.get_dummy_components() A__ : Union[str, Any] =self.pipeline_class(**lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) A__ : int =self.get_dummy_inputs(lowerCAmelCase__ ) A__ : List[Any] =pipe(**lowerCAmelCase__ ).images A__ : Optional[int] =image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) A__ : Union[str, Any] =np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) A__ : int =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : str ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A__ : str =torch.manual_seed(0 ) A__ : Dict =DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) A__ : Optional[int] =["""vase""", """umbrella""", """white shark""", """white wolf"""] A__ : int =pipe.get_label_ids(lowerCAmelCase__ ) A__ : str =pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ): A__ : int =load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1e-2 def lowercase__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' A__ : Tuple =DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) A__ : Tuple =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) A__ : int =["""vase""", """umbrella"""] A__ : Optional[Any] =pipe.get_label_ids(lowerCAmelCase__ ) A__ : Dict =torch.manual_seed(0 ) A__ : Optional[int] =pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ): A__ : int =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1e-1
701
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[int] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[int]=False ) -> Tuple: """simple docstring""" A__ : Optional[Any] =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ : Dict =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[Any], __snake_case : Any=False ) -> int: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ : Optional[int] ="""""" else: A__ : int ="""vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : List[Any] =state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" ) A__ : Dict =state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ : List[Any] =in_proj_weight[ : config.hidden_size, : ] A__ : Any =in_proj_bias[: config.hidden_size] A__ : int =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : Tuple =in_proj_weight[ -config.hidden_size :, : ] A__ : Any =in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( __snake_case : Tuple ) -> List[str]: """simple docstring""" A__ : List[Any] =["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" A__ : int =[ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Dict ) -> Optional[int]: """simple docstring""" A__ : Any =dct.pop(_SCREAMING_SNAKE_CASE ) A__ : Optional[int] =val def __lowerCamelCase ( __snake_case : str, __snake_case : Any ) -> List[str]: """simple docstring""" A__ : int =ViTMSNConfig() A__ : Optional[int] =1_000 A__ : str ="""datasets/huggingface/label-files""" A__ : Optional[int] ="""imagenet-1k-id2label.json""" A__ : List[Any] =json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ), """r""" ) ) A__ : Dict ={int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} A__ : Optional[Any] =idalabel A__ : Dict ={v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: A__ : str =384 A__ : str =1_536 A__ : List[str] =6 elif "l16" in checkpoint_url: A__ : int =1_024 A__ : Tuple =4_096 A__ : Tuple =24 A__ : Optional[int] =16 A__ : List[Any] =0.1 elif "b4" in checkpoint_url: A__ : List[str] =4 elif "l7" in checkpoint_url: A__ : Tuple =7 A__ : str =1_024 A__ : int =4_096 A__ : int =24 A__ : int =16 A__ : List[str] =0.1 A__ : Optional[Any] =ViTMSNModel(_SCREAMING_SNAKE_CASE ) A__ : Any =torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE, map_location="""cpu""" )["""target_encoder"""] A__ : Tuple =ViTImageProcessor(size=config.image_size ) remove_projection_head(_SCREAMING_SNAKE_CASE ) A__ : Union[str, Any] =create_rename_keys(_SCREAMING_SNAKE_CASE, base_model=_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, base_model=_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() A__ : int ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Tuple =Image.open(requests.get(_SCREAMING_SNAKE_CASE, stream=_SCREAMING_SNAKE_CASE ).raw ) A__ : Dict =ViTImageProcessor( size=config.image_size, image_mean=_SCREAMING_SNAKE_CASE, image_std=_SCREAMING_SNAKE_CASE ) A__ : int =image_processor(images=_SCREAMING_SNAKE_CASE, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) A__ : int =model(**_SCREAMING_SNAKE_CASE ) A__ : str =outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: A__ : List[str] =torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: A__ : Optional[Any] =torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: A__ : List[str] =torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: A__ : List[Any] =torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: A__ : Union[str, Any] =torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], _SCREAMING_SNAKE_CASE, atol=1E-4 ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __snake_case : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
702
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ : Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A__ : Optional[Any] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A__ : Optional[int] ="""xvjiarui/stable-diffusion-2-inpainting""" A__ , A__ : List[str] =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) A__ : List[str] ="""Face of a yellow cat, high resolution, sitting on a park bench""" A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : List[str] =50 A__ : List[str] =jax.device_count() A__ : List[str] =num_samples * [prompt] A__ : List[str] =num_samples * [init_image] A__ : Tuple =num_samples * [mask_image] A__ , A__ , A__ : List[Any] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # shard inputs and rng A__ : Dict =replicate(lowerCAmelCase_ ) A__ : Union[str, Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =shard(lowerCAmelCase_ ) A__ : Union[str, Any] =shard(lowerCAmelCase_ ) A__ : str =shard(lowerCAmelCase_ ) A__ : List[str] =pipeline( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ) A__ : List[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
687
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase : '''simple docstring''' __snake_case = XGLMConfig __snake_case = {} __snake_case = "gelu" def __init__( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=14 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : List[Any]=0.02 , ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =parent A__ : Any =batch_size A__ : List[str] =seq_length A__ : List[str] =is_training A__ : Union[str, Any] =use_input_mask A__ : Dict =use_labels A__ : Any =vocab_size A__ : Tuple =d_model A__ : Union[str, Any] =num_hidden_layers A__ : List[Any] =num_attention_heads A__ : List[Any] =ffn_dim A__ : Union[str, Any] =activation_function A__ : Dict =activation_dropout A__ : Any =attention_dropout A__ : str =max_position_embeddings A__ : List[str] =initializer_range A__ : Any =None A__ : Optional[int] =0 A__ : str =2 A__ : Any =1 def lowercase__ ( self : int ) -> Union[str, Any]: '''simple docstring''' return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def lowercase__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A__ : str =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) A__ : int =None if self.use_input_mask: A__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) A__ : int =self.get_config() A__ : Dict =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Optional[int] =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Any =config_and_inputs A__ : List[Any] ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __snake_case = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __snake_case = (TFXGLMForCausalLM,) if is_tf_available() else () __snake_case = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False def lowercase__ ( self : int ) -> Tuple: '''simple docstring''' A__ : int =TFXGLMModelTester(self ) A__ : Dict =ConfigTester(self , config_class=A_ , n_embd=37 ) def lowercase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() @slow def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : List[Any] =TFXGLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def lowercase__ ( self : Dict ) -> Any: '''simple docstring''' super().test_resize_token_embeddings() @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]=True ) -> List[str]: '''simple docstring''' A__ : List[str] =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) A__ : List[Any] =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off A__ : List[Any] =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on A__ : Any =model.generate(A_ , do_sample=A_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , A_ ) @slow def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) A__ : Dict =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) A__ : Optional[int] =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) A__ : Any =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): A__ : str =model.generate(A_ , do_sample=A_ , seed=[7, 0] ) A__ : Dict =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ ) A__ : Dict =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(A_ , A_ ) @slow def lowercase__ ( self : List[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) A__ : Union[str, Any] =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) A__ : Optional[int] ="""left""" # use different length sentences to test batching A__ : Optional[int] =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] A__ : List[Any] =tokenizer(A_ , return_tensors="""tf""" , padding=A_ ) A__ : List[str] =inputs["""input_ids"""] A__ : str =model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) A__ : Any =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids A__ : Union[str, Any] =model.generate(input_ids=A_ , max_new_tokens=12 ) A__ : Any =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids A__ : Tuple =model.generate(input_ids=A_ , max_new_tokens=12 ) A__ : Optional[int] =tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) A__ : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ ) A__ : int =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ ) A__ : int =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
703
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'conditional_detr' __snake_case = ['past_key_values'] __snake_case = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Tuple =backbone_config.get("""model_type""" ) A__ : List[str] =CONFIG_MAPPING[backbone_model_type] A__ : Dict =config_class.from_dict(lowerCAmelCase_ ) A__ : int =use_timm_backbone A__ : List[Any] =backbone_config A__ : Optional[int] =num_channels A__ : Optional[int] =num_queries A__ : Union[str, Any] =d_model A__ : Optional[int] =encoder_ffn_dim A__ : Optional[Any] =encoder_layers A__ : int =encoder_attention_heads A__ : Optional[Any] =decoder_ffn_dim A__ : Tuple =decoder_layers A__ : Optional[Any] =decoder_attention_heads A__ : Tuple =dropout A__ : int =attention_dropout A__ : Dict =activation_dropout A__ : Union[str, Any] =activation_function A__ : List[str] =init_std A__ : str =init_xavier_std A__ : int =encoder_layerdrop A__ : List[Any] =decoder_layerdrop A__ : Tuple =encoder_layers A__ : Tuple =auxiliary_loss A__ : List[Any] =position_embedding_type A__ : int =backbone A__ : Optional[int] =use_pretrained_backbone A__ : str =dilation # Hungarian matcher A__ : Any =class_cost A__ : str =bbox_cost A__ : str =giou_cost # Loss coefficients A__ : Union[str, Any] =mask_loss_coefficient A__ : int =dice_loss_coefficient A__ : Union[str, Any] =cls_loss_coefficient A__ : List[str] =bbox_loss_coefficient A__ : str =giou_loss_coefficient A__ : Optional[Any] =focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return self.d_model def lowercase__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' A__ : int =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A__ : str =self.backbone_config.to_dict() A__ : int =self.__class__.model_type return output class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = version.parse('1.11' ) @property def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def lowercase__ ( self : Any ) -> float: '''simple docstring''' return 1e-5 @property def lowercase__ ( self : Any ) -> int: '''simple docstring''' return 12
687
0
'''simple docstring''' import os def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Optional[int] =os.path.dirname(os.path.realpath(_lowercase ) ) A__ : Tuple =os.path.join(_lowercase, """triangle.txt""" ) with open(_lowercase ) as f: A__ : Dict =f.readlines() A__ : Tuple =[] for line in triangle: A__ : List[Any] =[] for number in line.strip().split(""" """ ): numbers_from_line.append(int(_lowercase ) ) a.append(_lowercase ) for i in range(1, len(_lowercase ) ): for j in range(len(a[i] ) ): A__ : str =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Optional[int] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_lowercase, _lowercase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
704
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[int] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' __snake_case = 'bit' __snake_case = ['preactivation', 'bottleneck'] __snake_case = ['SAME', 'VALID'] def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ : List[Any] =global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported" ) A__ : List[Any] =num_channels A__ : Tuple =embedding_size A__ : Union[str, Any] =hidden_sizes A__ : List[str] =depths A__ : Optional[Any] =layer_type A__ : int =hidden_act A__ : int =global_padding A__ : int =num_groups A__ : str =drop_path_rate A__ : str =embedding_dynamic_padding A__ : Dict =output_stride A__ : Optional[int] =width_factor A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
687
0
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __snake_case : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Dict , lowerCAmelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Dict: '''simple docstring''' super().__init__() A__ : List[str] =nn.ModuleList(__lowerCamelCase ) def lowercase__ ( self : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : List[torch.tensor] , lowerCAmelCase_ : List[float] , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , ) -> List[Any]: '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCamelCase , __lowerCamelCase , self.nets ) ): A__ : int =controlnet( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) # merge samples if i == 0: A__ : Union[str, Any] =down_samples, mid_sample else: A__ : Tuple =[ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCamelCase , __lowerCamelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Callable = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[str] = None , ) -> Dict: '''simple docstring''' A__ : Any =0 A__ : Tuple =save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCamelCase , is_main_process=__lowerCamelCase , save_function=__lowerCamelCase , safe_serialization=__lowerCamelCase , variant=__lowerCamelCase , ) idx += 1 A__ : Optional[Any] =model_path_to_save + f"_{idx}" @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] , **lowerCAmelCase_ : Optional[int] ) -> Dict: '''simple docstring''' A__ : Optional[int] =0 A__ : List[str] =[] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... A__ : int =pretrained_model_path while os.path.isdir(__lowerCamelCase ): A__ : int =ControlNetModel.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) controlnets.append(__lowerCamelCase ) idx += 1 A__ : int =pretrained_model_path + f"_{idx}" logger.info(f"{len(__lowerCamelCase )} controlnets loaded from {pretrained_model_path}." ) if len(__lowerCamelCase ) == 0: raise ValueError( f"No ControlNets found under {os.path.dirname(__lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(__lowerCamelCase )
705
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __snake_case : List[str] = 5_0003 __snake_case : Dict = 5_0002 @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def lowercase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Optional[Any] =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )] self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ ) A__ : Dict =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) A__ : Tuple =tokenizer.vocab_size A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )] self.assertListEqual( lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A__ : int =tokenizer(lowerCAmelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def lowercase__ ( cls : Optional[int] ) -> str: '''simple docstring''' A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) A__ : Optional[Any] =1 return cls def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) def lowercase__ ( self : int ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids ) A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase_ ) A__ : str =10 A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def lowercase__ ( self : Tuple ) -> str: '''simple docstring''' A__ : Tuple =tempfile.mkdtemp() A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ ) @require_torch def lowercase__ ( self : Any ) -> Any: '''simple docstring''' A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" ) A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) A__ : List[Any] =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" ) A__ : Optional[int] =self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" ) A__ : Optional[Any] =targets["""input_ids"""] A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : Any ) -> str: '''simple docstring''' A__ : Any =self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
687
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( __snake_case : Any ) -> None: """simple docstring""" create_state_space_tree(lowerCamelCase_, [], 0, [0 for i in range(len(lowerCamelCase_ ) )] ) def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[int], __snake_case : List[Any], __snake_case : List[Any], ) -> None: """simple docstring""" if index == len(lowerCamelCase_ ): print(lowerCamelCase_ ) return for i in range(len(lowerCamelCase_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A__ : List[str] =True create_state_space_tree(lowerCamelCase_, lowerCamelCase_, index + 1, lowerCamelCase_ ) current_sequence.pop() A__ : Dict =False __snake_case : Union[str, Any] = [3, 1, 2, 4] generate_all_permutations(sequence) __snake_case : Union[str, Any] = ['A', 'B', 'C'] generate_all_permutations(sequence_a)
706
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __snake_case : str = False class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int ="""A painting of a squirrel eating a burger """ A__ : Tuple =torch.manual_seed(0 ) A__ : int =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : int =generator.manual_seed(0 ) A__ : Tuple =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A__ : Dict ="""A painting of a squirrel eating a burger """ A__ : Optional[int] =torch.manual_seed(0 ) A__ : List[str] =pipe( prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
687
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : str = logging.get_logger(__name__) __snake_case : Optional[int] = { "google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCamelCase ( UpperCAmelCase_ ): '''simple docstring''' __snake_case = 'pegasus' __snake_case = ['past_key_values'] __snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=5_02_65 , lowerCAmelCase_ : Dict=10_24 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : int=40_96 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=40_96 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=1 , **lowerCAmelCase_ : Tuple , ) -> Tuple: '''simple docstring''' A__ : int =vocab_size A__ : Optional[Any] =max_position_embeddings A__ : List[Any] =d_model A__ : int =encoder_ffn_dim A__ : Any =encoder_layers A__ : List[str] =encoder_attention_heads A__ : List[str] =decoder_ffn_dim A__ : int =decoder_layers A__ : List[Any] =decoder_attention_heads A__ : List[str] =dropout A__ : Any =attention_dropout A__ : Optional[Any] =activation_dropout A__ : List[Any] =activation_function A__ : Union[str, Any] =init_std A__ : Union[str, Any] =encoder_layerdrop A__ : Tuple =decoder_layerdrop A__ : Dict =use_cache A__ : List[Any] =encoder_layers A__ : str =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , ) @property def lowercase__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' return self.d_model
707
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( lowercase_ , lowercase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Optional[Any] =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Optional[Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput: '''simple docstring''' A__ : Dict =self.encoder(lowerCAmelCase_ ) A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ ) else: A__ : List[str] =h A__ : Dict =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' A__ : Optional[int] =sample A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents A__ : Tuple =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
687
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __snake_case : Union[str, Any] = 3 def __lowerCamelCase ( __snake_case : int ) -> int: """simple docstring""" print("""Generating primitive root of p""" ) while True: A__ : List[str] =random.randrange(3, __A ) if pow(__A, 2, __A ) == 1: continue if pow(__A, __A, __A ) == 1: continue return g def __lowerCamelCase ( __snake_case : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print("""Generating prime p...""" ) A__ : Any =rabin_miller.generate_large_prime(__A ) # select large prime number. A__ : List[str] =primitive_root(__A ) # one primitive root on modulo p. A__ : List[str] =random.randrange(3, __A ) # private_key -> have to be greater than 2 for safety. A__ : List[str] =cryptomath.find_mod_inverse(pow(__A, __A, __A ), __A ) A__ : Union[str, Any] =(key_size, e_a, e_a, p) A__ : Optional[Any] =(key_size, d) return public_key, private_key def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> None: """simple docstring""" if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ): print("""\nWARNING:""" ) print( f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" """Use a different name or delete these files and re-run this program.""" ) sys.exit() A__ : str =generate_key(__A ) print(f"\nWriting public key to file {name}_pubkey.txt..." ) with open(f"{name}_pubkey.txt", """w""" ) as fo: fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(f"Writing private key to file {name}_privkey.txt..." ) with open(f"{name}_privkey.txt", """w""" ) as fo: fo.write(f"{private_key[0]},{private_key[1]}" ) def __lowerCamelCase ( ) -> None: """simple docstring""" print("""Making key files...""" ) make_key_files("""elgamal""", 2_048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
708
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __snake_case : str = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __snake_case : List[Any] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str: """simple docstring""" A__ : Optional[int] =set() A__ : Optional[int] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ : str =char A__ : List[Any] =set(__snake_case ) return pairs class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict: '''simple docstring''' super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =vocab_file A__ : Any =merges_file A__ : Union[str, Any] ={} A__ : Optional[int] =0 A__ : List[Any] =1 A__ : Tuple =2 A__ : Dict =3 self.add_from_file(lowerCAmelCase_ ) A__ : List[str] ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: A__ : str =merges_handle.read().split("""\n""" )[:-1] A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges] A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) A__ : Dict ={} def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ : Dict =[self.cls_token_id] A__ : Union[str, Any] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def lowercase__ ( self : Any ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ : int =tuple(lowerCAmelCase_ ) A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) A__ : Tuple =get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ : Tuple =bigram A__ : Optional[int] =[] A__ : Tuple =0 while i < len(lowerCAmelCase_ ): try: A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ : Union[str, Any] =j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ : Dict =tuple(lowerCAmelCase_ ) A__ : Dict =new_word if len(lowerCAmelCase_ ) == 1: break else: A__ : str =get_pairs(lowerCAmelCase_ ) A__ : Dict ="""@@ """.join(lowerCAmelCase_ ) A__ : Tuple =word[:-4] A__ : Any =word return word def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any: '''simple docstring''' A__ : int =[] A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ : Optional[Any] =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : Tuple =os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.merges_file , lowerCAmelCase_ ) return out_vocab_file, out_merge_file def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" ) return A__ : Union[str, Any] =f.readlines() for lineTmp in lines: A__ : List[Any] =lineTmp.strip() A__ : Dict =line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) A__ : Tuple =line[:idx] A__ : Tuple =len(self.encoder )
687
0
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> int: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__lowerCAmelCase, 2 ) - pow(__lowerCAmelCase, 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__lowerCAmelCase, 2 ) - pow(__lowerCAmelCase, 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__lowerCAmelCase, 2 ) + pow(__lowerCAmelCase, 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
709
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Any ) -> int: """simple docstring""" A__ : Union[str, Any] =nn.functional.normalize(__snake_case ) A__ : Optional[Any] =nn.functional.normalize(__snake_case ) return torch.mm(__snake_case, normalized_text_embeds.t() ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = CLIPConfig __snake_case = ['CLIPEncoderLayer'] def __init__( self : Tuple , lowerCAmelCase_ : CLIPConfig ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A__ : str =CLIPVisionModel(config.vision_config ) A__ : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase_ ) A__ : List[Any] =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Any =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase_ ) A__ : Optional[Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase_ ) A__ : int =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase_ ) @torch.no_grad() def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Any: '''simple docstring''' A__ : Any =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : Any =self.visual_projection(lowerCAmelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ : Any =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ).cpu().float().numpy() A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ).cpu().float().numpy() A__ : List[str] =[] A__ : Optional[int] =image_embeds.shape[0] for i in range(lowerCAmelCase_ ): A__ : List[Any] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ : Optional[Any] =special_cos_dist[i][concept_idx] A__ : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item() A__ : Tuple =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) A__ : Dict =0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ : Optional[int] =cos_dist[i][concept_idx] A__ : List[str] =self.concept_embeds_weights[concept_idx].item() A__ : Optional[int] =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) A__ : int =[len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor ) -> Optional[int]: '''simple docstring''' A__ : Optional[Any] =self.vision_model(lowerCAmelCase_ )[1] # pooled_output A__ : List[Any] =self.visual_projection(lowerCAmelCase_ ) A__ : Union[str, Any] =cosine_distance(lowerCAmelCase_ , self.special_care_embeds ) A__ : Optional[int] =cosine_distance(lowerCAmelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ : Dict =0.0 A__ : Dict =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ : Union[str, Any] =torch.any(special_scores > 0 , dim=1 ) A__ : Tuple =special_care * 0.01 A__ : str =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ : Optional[int] =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
0
'''simple docstring''' from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
710
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str: """simple docstring""" A__ : Optional[int] =[] for part_id in partition_order: A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(__snake_case ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : str =spark.range(100 ).repartition(1 ) A__ : List[str] =Spark(__snake_case ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Tuple =spark.range(10 ).repartition(2 ) A__ : List[str] =[1, 0] A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions. A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(10 ).repartition(1 ) A__ : List[str] =SparkExamplesIterable(__snake_case ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__snake_case ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Union[str, Any] =spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: A__ : Tuple =lambda __snake_case : x.reverse() A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] ) A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : Any =spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] ) for i, (row_id, row_dict) in enumerate(__snake_case ): A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() A__ : List[str] =spark.range(100 ).repartition(1 ) A__ : List[Any] =Spark(__snake_case ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
687
0
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowerCamelCase ( __snake_case : int = 8 ) -> Optional[Any]: """simple docstring""" A__ : str =ascii_letters + digits + punctuation return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> Tuple: """simple docstring""" i -= len(_lowerCAmelCase ) A__ : Union[str, Any] =i // 3 A__ : str =i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) A__ : Optional[Any] =( chars_incl + random(_lowerCAmelCase, quotient + remainder ) + random(_lowerCAmelCase, _lowerCAmelCase ) + random(_lowerCAmelCase, _lowerCAmelCase ) ) A__ : Optional[Any] =list(_lowerCAmelCase ) shuffle(_lowerCAmelCase ) return "".join(_lowerCAmelCase ) # random is a generalised function for letters, characters and numbers def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> int: """simple docstring""" return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) def __lowerCamelCase ( __snake_case : List[str], __snake_case : Tuple ) -> Any: """simple docstring""" pass # Put your code here... def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> Union[str, Any]: """simple docstring""" pass # Put your code here... def __lowerCamelCase ( __snake_case : Dict, __snake_case : Dict ) -> int: """simple docstring""" pass # Put your code here... def __lowerCamelCase ( __snake_case : str, __snake_case : int = 8 ) -> Tuple: """simple docstring""" if len(_lowerCAmelCase ) < min_length: # Your Password must be at least 8 characters long return False A__ : List[Any] =any(char in ascii_uppercase for char in password ) A__ : str =any(char in ascii_lowercase for char in password ) A__ : Any =any(char in digits for char in password ) A__ : Tuple =any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : str =int(input("""Please indicate the max length of your password: """ ).strip() ) A__ : Any =input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""", password_generator(_lowerCAmelCase ) ) print( """Alternative Password generated:""", alternative_password_generator(_lowerCAmelCase, _lowerCAmelCase ), ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
711
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : int = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
687
0
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[str] ) -> Any: """simple docstring""" assert isinstance(__lowerCAmelCase, __lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def __lowerCamelCase ( __snake_case : int, __snake_case : Dict, __snake_case : Tuple ) -> Any: """simple docstring""" A__ : List[Any] =tmp_path / """cache""" A__ : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ : Optional[int] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase ) @pytest.mark.parametrize( """features""", [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ], ) def __lowerCamelCase ( __snake_case : int, __snake_case : List[str], __snake_case : Any ) -> List[Any]: """simple docstring""" A__ : int =tmp_path / """cache""" A__ : List[str] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} A__ : int =features.copy() if features else default_expected_features A__ : List[str] =( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ : List[Any] =ParquetDatasetReader(__lowerCAmelCase, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def __lowerCamelCase ( __snake_case : List[str], __snake_case : Optional[int], __snake_case : Optional[int] ) -> str: """simple docstring""" A__ : Union[str, Any] =tmp_path / """cache""" A__ : Optional[int] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} A__ : Optional[int] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase, split=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Any ) -> Tuple: """simple docstring""" if issubclass(__lowerCAmelCase, __lowerCAmelCase ): A__ : Dict =parquet_path elif issubclass(__lowerCAmelCase, __lowerCAmelCase ): A__ : int =[parquet_path] A__ : List[str] =tmp_path / """cache""" A__ : List[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} A__ : str =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read() _check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : Tuple=("train",) ) -> Dict: """simple docstring""" assert isinstance(__lowerCAmelCase, __lowerCAmelCase ) for split in splits: A__ : List[Any] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : str ) -> str: """simple docstring""" A__ : List[Any] =tmp_path / """cache""" A__ : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ : str =ParquetDatasetReader( {"""train""": parquet_path}, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase ) @pytest.mark.parametrize( """features""", [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ], ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any ) -> Union[str, Any]: """simple docstring""" A__ : Dict =tmp_path / """cache""" A__ : List[str] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} A__ : List[Any] =features.copy() if features else default_expected_features A__ : Tuple =( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ : int =ParquetDatasetReader({"""train""": parquet_path}, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any, __snake_case : Dict ) -> List[str]: """simple docstring""" if split: A__ : Optional[Any] ={split: parquet_path} else: A__ : Union[str, Any] ="""train""" A__ : str ={"""train""": parquet_path, """test""": parquet_path} A__ : Any =tmp_path / """cache""" A__ : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} A__ : Optional[Any] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read() _check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str] ) -> int: """simple docstring""" A__ : List[str] =ParquetDatasetWriter(__lowerCAmelCase, tmp_path / """foo.parquet""" ) assert writer.write() > 0 A__ : Tuple =pq.ParquetFile(tmp_path / """foo.parquet""" ) A__ : List[str] =pf.read() assert dataset.data.table == output_table def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Union[str, Any] ) -> Any: """simple docstring""" A__ : Tuple =str(shared_datadir / """test_image_rgb.jpg""" ) A__ : List[str] ={"""image""": [image_path]} A__ : str =Features({"""image""": Image()} ) A__ : List[Any] =Dataset.from_dict(__lowerCAmelCase, features=__lowerCAmelCase ) A__ : str =ParquetDatasetWriter(__lowerCAmelCase, tmp_path / """foo.parquet""" ) assert writer.write() > 0 A__ : int =Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features A__ : str =ParquetDatasetReader(str(tmp_path / """foo.parquet""" ), streaming=__lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""", [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def __lowerCamelCase ( __snake_case : List[str], __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__lowerCAmelCase ) == expected
712
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCamelCase ( __snake_case : Dict ) -> List[str]: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str: '''simple docstring''' super().__init__() A__ : Union[str, Any] =module A__ : Union[str, Any] =nn.Sequential( nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , ) A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict: '''simple docstring''' return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109659552692574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 10 def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' # Models and tokenizer A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Models and tokenizer A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A__ : str =self.model_abit.config self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) ) A__ : Union[str, Any] =config.to_dict() A__ : Any =config.to_diff_dict() A__ : Optional[Any] =config.to_json_string() def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' from bitsandbytes.nn import Paramsabit A__ : int =self.model_fpaa.get_memory_footprint() A__ : Optional[Any] =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A__ : Tuple =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCAmelCase_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() A__ : Tuple =True A__ : Optional[int] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" ) A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' A__ : Tuple =BitsAndBytesConfig() with self.assertRaises(lowerCAmelCase_ ): A__ : Dict =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowerCAmelCase_ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCAmelCase_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) A__ : Optional[Any] =self.model_fpaa.to(torch.floataa ) A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A__ : List[str] =self.model_fpaa.half() # Check this does not throw an error A__ : int =self.model_fpaa.float() def lowercase__ ( self : int ) -> Dict: '''simple docstring''' A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowercase__ ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ : Tuple ="""t5-small""" A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name ) A__ : Optional[int] ="""Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules A__ : Optional[Any] =None # test with `t5-small` A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Optional[Any] =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : List[str] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ ) A__ : Dict =modules def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Any =model.generate(**lowerCAmelCase_ ) # test with `flan-t5-small` A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A__ : Dict =model.generate(**lowerCAmelCase_ ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' super().setUp() # model_name A__ : Any ="""bigscience/bloom-560m""" A__ : List[Any] ="""t5-small""" # Different types of model A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Sequence classification model A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # CausalLM model A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) # Seq2seq model A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" ) def lowercase__ ( self : Dict ) -> int: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' super().setUp() def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' A__ : Dict =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A__ : Optional[int] =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : str ) -> int: '''simple docstring''' super().setUp() def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] ="""facebook/opt-350m""" super().setUp() def lowercase__ ( self : List[str] ) -> Dict: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A__ : int =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A__ : Dict =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCAmelCase_ ) ): A__ : int =LoRALayer(module.q_proj , rank=16 ) A__ : Any =LoRALayer(module.k_proj , rank=16 ) A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A__ : Any =model.forward(**lowerCAmelCase_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCAmelCase_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191854854152187
687
0