code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ :Tuple = logging.get_logger(__name__) lowercase__ :Optional[int] = { "andreasmadsen/efficient_mlm_m0.40": ( "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" ), } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : str ='''roberta-prelayernorm''' def __init__( self ,A__=5_0_2_6_5 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,): super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = use_cache lowercase = classifier_dropout class lowercase ( SCREAMING_SNAKE_CASE__ ): @property def A__ ( self): if self.task == "multiple-choice": lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
101
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase__ =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ ) return generator, ["Something to write", "Something else"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = generator('''Something there''' ) self.assertEqual(a_ , [{'''generated_text''': ANY(a_ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) ) __snake_case : List[Any] = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a_ ) self.assertEqual( a_ , [ [{'''generated_text''': ANY(a_ )}, {'''generated_text''': ANY(a_ )}], [{'''generated_text''': ANY(a_ )}, {'''generated_text''': ANY(a_ )}], ] , ) __snake_case : Optional[int] = generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a_ ) self.assertEqual( a_ , [ [{'''generated_text''': ANY(a_ )}, {'''generated_text''': ANY(a_ )}], [{'''generated_text''': ANY(a_ )}, {'''generated_text''': ANY(a_ )}], ] , ) with self.assertRaises(a_ ): generator(4 ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' ) # do_sample=False necessary for reproducibility __snake_case : int = generator('''Something there''' , do_sample=a_ ) self.assertEqual(a_ , [{'''generated_text''': ''''''}] ) __snake_case : Optional[int] = 3 __snake_case : int = generator( '''Something there''' , num_return_sequences=a_ , num_beams=a_ , ) __snake_case : Dict = [ {'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''}, {'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''}, {'''generated_text''': ''''''}, ] self.assertEqual(a_ , a_ ) __snake_case : List[Any] = generator('''This is a test''' , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ ) self.assertEqual( a_ , [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ] , ) __snake_case : Dict = generator.model.config.eos_token_id __snake_case : Any = '''<pad>''' __snake_case : Tuple = generator( ['''This is a test''', '''This is a second test'''] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , ) self.assertEqual( a_ , [ [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], ] , ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' ) # do_sample=False necessary for reproducibility __snake_case : Union[str, Any] = generator('''Something there''' , do_sample=a_ ) self.assertEqual(a_ , [{'''generated_text''': ''''''}] )
102
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : List[str] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : str , **A_ : Any): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : Optional[int] , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[str] , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : List[str] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : Optional[int] , **A_ : Any): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : Dict , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : int , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : Optional[int] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Dict , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : Tuple , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Optional[int] , **A_ : Optional[int]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : List[Any] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : Optional[int] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : Optional[int] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : int , **A_ : Optional[int]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : List[str] , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Dict , *A_ : List[str] , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : Optional[Any] , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : Optional[Any] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : Optional[int] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece'''])
103
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
0
'''simple docstring''' from __future__ import annotations from random import random class lowercase_ : """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : int | None = None ): __lowercase = value __lowercase = random() __lowercase = None __lowercase = None def __repr__( self : List[str] ): from pprint import pformat if self.left is None and self.right is None: return F"'{self.value}: {self.prior:.5}'" else: return pformat( {F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1 ) def __str__( self : List[Any] ): __lowercase = str(self.value ) + ''' ''' __lowercase = str(self.left or '''''' ) __lowercase = str(self.right or '''''' ) return value + left + right def _A ( A__ , A__ ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __lowercase , __lowercase = split(root.left , A__ ) return left, root else: __lowercase , __lowercase = split(root.right , A__ ) return root, right def _A ( A__ , A__ ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __lowercase = merge(left.right , A__ ) return left else: __lowercase = merge(A__ , right.left ) return right def _A ( A__ , A__ ): """simple docstring""" __lowercase = Node(A__ ) __lowercase , __lowercase = split(A__ , A__ ) return merge(merge(A__ , A__ ) , A__ ) def _A ( A__ , A__ ): """simple docstring""" __lowercase , __lowercase = split(A__ , value - 1 ) __lowercase , __lowercase = split(A__ , A__ ) return merge(A__ , A__ ) def _A ( A__ ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def _A ( A__ , A__ ): """simple docstring""" for arg in args.split(): if arg[0] == "+": __lowercase = insert(A__ , int(arg[1:] ) ) elif arg[0] == "-": __lowercase = erase(A__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def _A ( ): """simple docstring""" __lowercase = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) __lowercase = input() while args != "q": __lowercase = interact_treap(A__ , A__ ) print(A__ ) __lowercase = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
104
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
0
"""simple docstring""" a : Tuple = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' a : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] a : Union[str, Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
105
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = [0 for i in range(len(A_ ) )] # initialize interval's left pointer and right pointer lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = 0, 0 for i in range(1 , len(A_ ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase__ : Optional[int] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase__ : Tuple = min_edge while go_next(A_ , A_ , A_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = i, i + z_result[i] - 1 return z_result def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): return i + z_result[i] < len(A_ ) and s[z_result[i]] == s[i + z_result[i]] def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Dict = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase__ : List[str] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(A_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
106
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
0
def __magic_name__ ( A : int, A : int ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) a = str(bin(A ) ) binary_number += "0" * shift_amount return binary_number def __magic_name__ ( A : int, A : int ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) a = str(bin(A ) )[2:] if shift_amount >= len(A ): return "0b0" a = binary_number[: len(A ) - shift_amount] return "0b" + shifted_binary_number def __magic_name__ ( A : int, A : int ): '''simple docstring''' if number >= 0: # Get binary representation of positive number a = "0" + str(bin(A ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number a = len(bin(A )[3:] ) # Find 2's complement of number a = bin(abs(A ) - (1 << binary_number_length) )[3:] a = ( "1" + "0" * (binary_number_length - len(A )) + binary_number ) if shift_amount >= len(A ): return "0b" + binary_number[0] * len(A ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(A ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
107
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class SCREAMING_SNAKE_CASE__ : """simple docstring""" pass
108
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
0
"""simple docstring""" def _snake_case ( UpperCamelCase : int ): if length <= 0 or not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(UpperCamelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
109
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def UpperCamelCase ( __magic_name__ : Tuple ) -> bool: """simple docstring""" lowercase__ = int(number**0.5 ) return number == sq * sq def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : int ) -> tuple[int, int]: """simple docstring""" lowercase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase__ = x_den * y_den * z_den lowercase__ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def UpperCamelCase ( __magic_name__ : List[str] = 35 ) -> int: """simple docstring""" lowercase__ = set() lowercase__ = 42 lowercase__ = Fraction(0 ) lowercase__ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase__ = x_num * y_den + x_den * y_num lowercase__ = x_den * y_den lowercase__ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 lowercase__ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase__ = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): lowercase__ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) lowercase__ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) lowercase__ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 lowercase__ = x_num * y_num lowercase__ = x_den * y_num + x_num * y_den lowercase__ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 lowercase__ = x_num * x_num * y_num * y_num lowercase__ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): lowercase__ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) lowercase__ = int(sqrt(_SCREAMING_SNAKE_CASE ) ) lowercase__ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
305
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
_A = tuple[float, float, float] _A = tuple[float, float, float] def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase =end_pointa[0] - end_pointa[0] __UpperCamelCase =end_pointa[1] - end_pointa[1] __UpperCamelCase =end_pointa[2] - end_pointa[2] return (x, y, z) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase =ab[1] * ac[2] - ab[2] * ac[1] # *i __UpperCamelCase =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __UpperCamelCase =ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ): return tuple(round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] = 10 ): __UpperCamelCase =create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase =create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_zero_vector(get_ad_vectors_cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
62
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __A ( snake_case__ ): '''simple docstring''' def __init__( self : int ,_snake_case : Any ,_snake_case : int ,_snake_case : List[str] ) -> List[str]: """simple docstring""" lowercase__ : Optional[int] = dataset lowercase__ : str = process lowercase__ : Tuple = params def __len__( self : str ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self : Dict ,_snake_case : Any ) -> str: """simple docstring""" lowercase__ : Dict = self.dataset[i] lowercase__ : int = self.process(UpperCAmelCase_ ,**self.params ) return processed class __A ( snake_case__ ): '''simple docstring''' def __init__( self : Any ,_snake_case : List[Any] ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Tuple=None ) -> Dict: """simple docstring""" lowercase__ : Optional[Any] = loader lowercase__ : Dict = infer lowercase__ : Any = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowercase__ : Optional[Any] = None lowercase__ : List[str] = loader_batch_size # Internal bookkeeping lowercase__ : Optional[Any] = None lowercase__ : List[Any] = None def __len__( self : int ) -> Tuple: """simple docstring""" return len(self.loader ) def __iter__( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase__ : Tuple = iter(self.loader ) return self def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" if isinstance(self._loader_batch_data ,torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowercase__ : Tuple = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowercase__ : Optional[Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): # Convert ModelOutput to tuple first lowercase__ : int = element.to_tuple() if isinstance(element[0] ,torch.Tensor ): lowercase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowercase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] ,torch.Tensor ): lowercase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowercase__ : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowercase__ : Any = None elif isinstance(element[self._loader_batch_index] ,torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase__ : Dict = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] ,np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase__ : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 ) else: # This is typically a list, so no need to `unsqueeze`. lowercase__ : List[str] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowercase__ : List[str] = self._loader_batch_data.__class__(UpperCAmelCase_ ) self._loader_batch_index += 1 return result def UpperCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowercase__ : str = next(self.iterator ) lowercase__ : Optional[int] = self.infer(UpperCAmelCase_ ,**self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCAmelCase_ ,torch.Tensor ): lowercase__ : List[Any] = processed else: lowercase__ : List[str] = list(processed.keys() )[0] lowercase__ : List[str] = processed[key] if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): lowercase__ : Any = len(UpperCAmelCase_ ) else: lowercase__ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase__ : int = observed_batch_size # Setting internal index to unwrap the batch lowercase__ : List[Any] = processed lowercase__ : Tuple = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __A ( snake_case__ ): '''simple docstring''' def __init__( self : Any ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : int=None ) -> int: """simple docstring""" super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def __iter__( self : Optional[Any] ) -> List[str]: """simple docstring""" lowercase__ : Optional[int] = iter(self.loader ) lowercase__ : Tuple = None return self def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" if self.subiterator is None: lowercase__ : Tuple = self.infer(next(self.iterator ) ,**self.params ) try: # Try to return next item lowercase__ : Optional[Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowercase__ : List[Any] = self.infer(next(self.iterator ) ,**self.params ) lowercase__ : Union[str, Any] = next(self.subiterator ) return processed class __A ( snake_case__ ): '''simple docstring''' def __iter__( self : Any ) -> Tuple: """simple docstring""" lowercase__ : str = iter(self.loader ) return self def UpperCAmelCase ( self : int ) -> Any: """simple docstring""" lowercase__ : Any = False lowercase__ : Any = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowercase__ : Tuple = self.loader_batch_item() lowercase__ : Union[str, Any] = item.pop('''is_last''' ) accumulator.append(UpperCAmelCase_ ) if is_last: return accumulator while not is_last: lowercase__ : Optional[Any] = self.infer(next(self.iterator ) ,**self.params ) if self.loader_batch_size is not None: if isinstance(UpperCAmelCase_ ,torch.Tensor ): lowercase__ : Optional[Any] = processed else: lowercase__ : Tuple = list(processed.keys() )[0] lowercase__ : Optional[int] = processed[key] if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): lowercase__ : List[Any] = len(UpperCAmelCase_ ) else: lowercase__ : Any = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase__ : Optional[int] = observed_batch_size lowercase__ : List[str] = processed lowercase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowercase__ : int = self.loader_batch_item() lowercase__ : List[str] = item.pop('''is_last''' ) accumulator.append(UpperCAmelCase_ ) if is_last: return accumulator else: lowercase__ : Union[str, Any] = processed lowercase__ : str = item.pop('''is_last''' ) accumulator.append(UpperCAmelCase_ ) return accumulator class __A ( snake_case__ ): '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Dataset ,_snake_case : str ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = dataset lowercase__ : List[str] = key def __len__( self : List[str] ) -> Optional[Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self : Any ,_snake_case : List[str] ) -> Tuple: """simple docstring""" return self.dataset[i][self.key] class __A ( snake_case__ ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : Dataset ,_snake_case : str ,_snake_case : str ) -> Optional[Any]: """simple docstring""" lowercase__ : Optional[int] = dataset lowercase__ : int = keya lowercase__ : Optional[Any] = keya def __len__( self : Dict ) -> str: """simple docstring""" return len(self.dataset ) def __getitem__( self : Dict ,_snake_case : Optional[int] ) -> Union[str, Any]: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
16
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' from __future__ import annotations import queue class UpperCAmelCase : def __init__( self :Tuple , lowercase_ :Any )-> Optional[int]: A__ = data A__ = None A__ = None def UpperCamelCase ( ): print("\n********Press N to stop entering at any point of time********\n" ) A__ = input("Enter the value of the root node: " ).strip().lower() A__ = queue.Queue() A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): A__ = q.get() A__ = F"Enter the left node of {node_found.data}: " A__ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n" if check == "n": return tree_node A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) A__ = left_node q.put(_SCREAMING_SNAKE_CASE ) A__ = F"Enter the right node of {node_found.data}: " A__ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n" if check == "n": return tree_node A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) ) A__ = right_node q.put(_SCREAMING_SNAKE_CASE ) raise def UpperCamelCase ( _lowerCamelCase : Optional[Any] ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def UpperCamelCase ( _lowerCamelCase : Optional[int] ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def UpperCamelCase ( _lowerCamelCase : Optional[int] ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def UpperCamelCase ( _lowerCamelCase : Any ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return A__ = queue.Queue() q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): A__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCamelCase ( _lowerCamelCase : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return A__ = queue.Queue() q.put(_SCREAMING_SNAKE_CASE ) while not q.empty(): A__ = [] while not q.empty(): A__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(_SCREAMING_SNAKE_CASE ) def UpperCamelCase ( _lowerCamelCase : Tuple ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return A__ = [] A__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(_SCREAMING_SNAKE_CASE ) A__ = n.left # end of while means current node doesn't have left child A__ = stack.pop() # start to traverse its right child A__ = n.right def UpperCamelCase ( _lowerCamelCase : List[str] ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return A__ = [] A__ = node while n or stack: while n: stack.append(_SCREAMING_SNAKE_CASE ) A__ = n.left A__ = stack.pop() print(n.data , end="," ) A__ = n.right def UpperCamelCase ( _lowerCamelCase : List[Any] ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node: return A__, A__ = [], [] A__ = node stacka.append(_SCREAMING_SNAKE_CASE ) while stacka: # to find the reversed order of post order, store it in stack2 A__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(_SCREAMING_SNAKE_CASE ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def UpperCamelCase ( _lowerCamelCase : Optional[int] = "" , _lowerCamelCase : Optional[int]=50 , _lowerCamelCase : Tuple="*" ): if not s: return "\n" + width * char A__, A__ = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 ) return F"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) __lowerCAmelCase : TreeNode =build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
237
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' def A_ ( snake_case ): if num < 0: return False SCREAMING_SNAKE_CASE:List[str] = num SCREAMING_SNAKE_CASE:Dict = 0 while num > 0: SCREAMING_SNAKE_CASE:Union[str, Any] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
139
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
0
from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A( snake_case__ ): snake_case_ = """dandelin/vilt-b32-finetuned-vqa""" snake_case_ = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) snake_case_ = """image_qa""" snake_case_ = AutoProcessor snake_case_ = AutoModelForVisualQuestionAnswering snake_case_ = ["""image""", """text"""] snake_case_ = ["""text"""] def __init__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' requires_backends(self , ['''vision'''] ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return self.pre_processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' with torch.no_grad(): return self.model(**UpperCAmelCase_ ).logits def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any: '''simple docstring''' __a = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
6
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=True , a_=1 / 2_55 , a_=True , ): '''simple docstring''' __snake_case : int = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} __snake_case : Union[str, Any] = parent __snake_case : int = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = min_resolution __snake_case : str = max_resolution __snake_case : List[str] = do_resize __snake_case : str = size __snake_case : str = do_normalize __snake_case : int = image_mean __snake_case : Union[str, Any] = image_std __snake_case : Tuple = do_rescale __snake_case : Dict = rescale_factor __snake_case : Optional[Any] = do_pad def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE (self , a_ , a_=False ): '''simple docstring''' if not batched: __snake_case : Union[str, Any] = image_inputs[0] if isinstance(UpperCAmelCase_ , Image.Image ): __snake_case , __snake_case : Optional[Any] = image.size else: __snake_case , __snake_case : Dict = image.shape[1], image.shape[2] if w < h: __snake_case : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w ) __snake_case : Dict = self.size['''shortest_edge'''] elif w > h: __snake_case : Any = self.size['''shortest_edge'''] __snake_case : Optional[Any] = int(self.size['''shortest_edge'''] * w / h ) else: __snake_case : Dict = self.size['''shortest_edge'''] __snake_case : str = self.size['''shortest_edge'''] else: __snake_case : List[str] = [] for image in image_inputs: __snake_case , __snake_case : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __snake_case : Union[str, Any] = max(UpperCAmelCase_ , key=lambda a_ : item[0] )[0] __snake_case : Tuple = max(UpperCAmelCase_ , key=lambda a_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _UpperCAmelCase ( snake_case__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =ConditionalDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) __snake_case : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input __snake_case : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case , __snake_case : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) __snake_case : List[str] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input __snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __snake_case , __snake_case : int = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values __snake_case , __snake_case : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __snake_case , __snake_case : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values __snake_case , __snake_case : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __snake_case : Optional[Any] = json.loads(f.read() ) __snake_case : List[Any] = {'''image_id''': 3_97_69, '''annotations''': target} # encode them __snake_case : str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) __snake_case : List[str] = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values __snake_case : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) __snake_case : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area __snake_case : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes __snake_case : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) __snake_case : Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id __snake_case : Optional[Any] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd __snake_case : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels __snake_case : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify orig_size __snake_case : int = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size __snake_case : List[str] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __snake_case : List[Any] = json.loads(f.read() ) __snake_case : Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} __snake_case : Union[str, Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __snake_case : Any = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) __snake_case : str = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values __snake_case : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) __snake_case : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area __snake_case : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes __snake_case : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) __snake_case : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id __snake_case : str = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd __snake_case : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels __snake_case : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify masks __snake_case : int = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase_ ) # verify orig_size __snake_case : Any = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size __snake_case : int = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) )
102
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
0
'''simple docstring''' import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCamelCase_ : lowercase = None def _lowercase( self ) -> str: UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Union[str, Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCAmelCase_ ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCAmelCase_ ) UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(UpperCAmelCase_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0] check_json_file_has_correct_format(UpperCAmelCase_ ) UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = self.feature_extraction_class() self.assertIsNotNone(UpperCAmelCase_ )
265
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
0
__UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def UpperCamelCase ( ) -> None: UpperCamelCase : List[Any] = input('Enter message: ' ) UpperCamelCase : str = input('Enter key [alphanumeric]: ' ) UpperCamelCase : Dict = input('Encrypt/Decrypt [e/d]: ' ) if mode.lower().startswith('e' ): UpperCamelCase : List[Any] = 'encrypt' UpperCamelCase : int = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif mode.lower().startswith('d' ): UpperCamelCase : str = 'decrypt' UpperCamelCase : int = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""\n{mode.title()}ed message:""" ) print(_SCREAMING_SNAKE_CASE ) def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ) -> str: return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encrypt' ) def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[str] ) -> str: return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decrypt' ) def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str ) -> str: UpperCamelCase : Union[str, Any] = [] UpperCamelCase : Dict = 0 UpperCamelCase : Optional[Any] = key.upper() for symbol in message: UpperCamelCase : List[str] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_SCREAMING_SNAKE_CASE ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = 0 else: translated.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
119
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
0
'''simple docstring''' import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=5_1_2, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowercase ( __A ): '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(f"could not parse string as bool {string}" ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) a__ : Any = parser.parse_args() a__ : str = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
349
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
0
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class A__ ( snake_case__ ): def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def snake_case_ ( self ) -> int: '''simple docstring''' with self.assertRaises(UpperCAmelCase_ ): A_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def snake_case_ ( self ) -> List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase_ ): A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): A_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def snake_case_ ( self ) -> Tuple: '''simple docstring''' A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' A_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def snake_case_ ( self ) -> int: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): A_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def snake_case_ ( self ) -> int: '''simple docstring''' A_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def snake_case_ ( self ) -> Tuple: '''simple docstring''' import PIL.Image A_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=UpperCAmelCase_ ) as mock_cast_to_python_objects: A_ = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) ) A_ , A_ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , UpperCAmelCase_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str: A_ = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE ) A_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE ) A_ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] ) @pytest.mark.parametrize( """fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any: A_ = pa.BufferOutputStream() A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def UpperCAmelCase__ ( ) -> Optional[Any]: A_ = pa.BufferOutputStream() A_ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, features=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata A_ = pa.BufferReader(output.getvalue() ) A_ = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE ) A_ = f.read_all() A_ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any: A_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer: with pytest.raises(_SCREAMING_SNAKE_CASE ): writer.write({"""col_1""": """foo""", """col_2""": 1}, key=[1, 2] ) A_ , A_ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""", [None, 2, 10] ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple: A_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer: with pytest.raises(_SCREAMING_SNAKE_CASE ): writer.write({"""col_1""": """foo""", """col_2""": 1}, key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2}, key=10 ) A_ , A_ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""", [None, 2, 10] ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any: A_ = pa.BufferOutputStream() with ArrowWriter( stream=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE, hash_salt="""split_name""", check_duplicates=_SCREAMING_SNAKE_CASE, ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1}, key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2}, key=2 ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] ) @pytest.mark.parametrize( """fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any: A_ = pa.BufferOutputStream() A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] ) @pytest.mark.parametrize( """fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: A_ = pa.BufferOutputStream() A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""", [None, 1, 10] ) @pytest.mark.parametrize( """fields""", [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict: A_ = pa.BufferOutputStream() A_ = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None with ArrowWriter(stream=_SCREAMING_SNAKE_CASE, schema=_SCREAMING_SNAKE_CASE, writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata ) _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def UpperCAmelCase__ ( ) -> int: with tempfile.TemporaryDirectory() as tmp_dir: A_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} A_ = os.path.join(_SCREAMING_SNAKE_CASE, """test.arrow""" ) with ArrowWriter(path=_SCREAMING_SNAKE_CASE, schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE, metadata=writer._schema.metadata ) _check_output(_SCREAMING_SNAKE_CASE, 1 ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]: if pa.types.is_list(_SCREAMING_SNAKE_CASE ): return get_base_dtype(arr_type.value_type ) else: return arr_type def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]: if isinstance(lst[0], _SCREAMING_SNAKE_CASE ): change_first_primitive_element_in_list(lst[0], _SCREAMING_SNAKE_CASE ) else: A_ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""", [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]: A_ = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE, optimized_int_type=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""", [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ], ) @pytest.mark.parametrize("""sequence""", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict: # in range A_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE, col=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications A_ = copy.deepcopy(_SCREAMING_SNAKE_CASE ) A_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) A_ = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE, col=_SCREAMING_SNAKE_CASE ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""", [False, True] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]: A_ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: A_ = """mock://dataset-train.arrow""" with ArrowWriter(path=_SCREAMING_SNAKE_CASE, storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs, type(_SCREAMING_SNAKE_CASE ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ) -> Optional[int]: A_ = pa.BufferOutputStream() with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) A_ , A_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 A_ = pa.BufferReader(output.getvalue() ) A_ = pq.read_table(_SCREAMING_SNAKE_CASE ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""", [False, True] ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict: import PIL.Image A_ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE, format="""png""" ) A_ = pa.BufferOutputStream() with ParquetWriter( stream=_SCREAMING_SNAKE_CASE, features=Features({"""image""": Image()} ), embed_local_files=_SCREAMING_SNAKE_CASE ) as writer: writer.write({"""image""": image_path} ) writer.finalize() A_ = pa.BufferReader(output.getvalue() ) A_ = pq.read_table(_SCREAMING_SNAKE_CASE ) A_ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""], _SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE, """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def UpperCAmelCase__ ( ) -> List[Any]: A_ = pa.schema([pa.field("""col_1""", pa.string(), nullable=_SCREAMING_SNAKE_CASE )] ) A_ = pa.BufferOutputStream() with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer: writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE ) assert writer._schema == pa.schema([pa.field("""col_1""", pa.string() )] )
162
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder A : List[str] = '__DUMMY_TRANSFORMERS_USER__' A : Dict = 'Dummy User' A : Tuple = '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' A : Optional[Any] = 'https://hub-ci.huggingface.co' A : List[str] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' A : Optional[int] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' A : Union[str, Any] = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def UpperCamelCase ( __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def UpperCamelCase ( __magic_name__ : str ) -> str: """simple docstring""" monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _SCREAMING_SNAKE_CASE ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Any ) -> Tuple: """simple docstring""" HfFolder.save_token(_SCREAMING_SNAKE_CASE ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def UpperCamelCase ( ) -> List[str]: """simple docstring""" return HfApi(endpoint=_SCREAMING_SNAKE_CASE ) @pytest.fixture(scope="""session""" ) def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = HfFolder.get_token() HfFolder.save_token(_SCREAMING_SNAKE_CASE ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @pytest.fixture def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" def _cleanup_repo(__magic_name__ : str ): hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def UpperCamelCase ( __magic_name__ : Optional[int] ) -> str: """simple docstring""" @contextmanager def _temporary_repo(__magic_name__ : List[str] ): try: yield repo_id finally: cleanup_repo(_SCREAMING_SNAKE_CASE ) return _temporary_repo @pytest.fixture(scope="""session""" ) def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = f'''repo_txt_data-{int(time.time() * 1_0E3 )}''' lowercase__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data/text_data.txt""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" lowercase__ = f'''repo_zipped_txt_data-{int(time.time() * 1_0E3 )}''' lowercase__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCamelCase ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" lowercase__ = f'''repo_zipped_img_data-{int(time.time() * 1_0E3 )}''' lowercase__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE ) hf_api.upload_file( token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
305
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _A = logging.get_logger(__name__) _A = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class UpperCAmelCase__ ( snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[str] = """blenderbot-small""" UpperCAmelCase__ : Union[str, Any] = ["""past_key_values"""] UpperCAmelCase__ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , A_=50265 , A_=512 , A_=8 , A_=2048 , A_=16 , A_=8 , A_=2048 , A_=16 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="gelu" , A_=512 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1 , A_=False , A_=0 , A_=1 , A_=2 , A_=2 , **A_ , ) -> Optional[Any]: __UpperCamelCase =vocab_size __UpperCamelCase =max_position_embeddings __UpperCamelCase =d_model __UpperCamelCase =encoder_ffn_dim __UpperCamelCase =encoder_layers __UpperCamelCase =encoder_attention_heads __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_attention_heads __UpperCamelCase =dropout __UpperCamelCase =attention_dropout __UpperCamelCase =activation_dropout __UpperCamelCase =activation_function __UpperCamelCase =init_std __UpperCamelCase =encoder_layerdrop __UpperCamelCase =decoder_layerdrop __UpperCamelCase =use_cache __UpperCamelCase =encoder_layers __UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , ) class UpperCAmelCase__ ( snake_case__ ): """simple docstring""" @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __UpperCamelCase ={0: 'batch'} __UpperCamelCase ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __UpperCamelCase ={0: 'batch', 1: 'decoder_sequence'} __UpperCamelCase ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __UpperCamelCase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __UpperCamelCase , __UpperCamelCase =self.num_layers for i in range(UpperCAmelCase_ ): __UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'} __UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'} else: __UpperCamelCase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase =super().outputs else: __UpperCamelCase =super(UpperCAmelCase_ , self ).outputs if self.use_past: __UpperCamelCase , __UpperCamelCase =self.num_layers for i in range(UpperCAmelCase_ ): __UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'} __UpperCamelCase ={0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]: __UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Generate decoder inputs __UpperCamelCase =seq_length if not self.use_past else 1 __UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) __UpperCamelCase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __UpperCamelCase =dict(**UpperCAmelCase_ , **UpperCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape __UpperCamelCase =common_inputs['decoder_input_ids'].shape[1] __UpperCamelCase , __UpperCamelCase =self.num_attention_heads __UpperCamelCase =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase =decoder_seq_length + 3 __UpperCamelCase =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCamelCase =torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ )] , dim=1 ) __UpperCamelCase =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCamelCase , __UpperCamelCase =self.num_layers __UpperCamelCase =min(UpperCAmelCase_ , UpperCAmelCase_ ) __UpperCamelCase =max(UpperCAmelCase_ , UpperCAmelCase_ ) - min_num_layers __UpperCamelCase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(UpperCAmelCase_ ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ ), ) ) # TODO: test this. __UpperCamelCase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(UpperCAmelCase_ , UpperCAmelCase_ ): common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) ) return common_inputs def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]: __UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase =seqlen + 2 __UpperCamelCase , __UpperCamelCase =self.num_layers __UpperCamelCase , __UpperCamelCase =self.num_attention_heads __UpperCamelCase =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase =common_inputs['attention_mask'].dtype __UpperCamelCase =torch.cat( [common_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 ) __UpperCamelCase =[ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(UpperCAmelCase_ ) ] return common_inputs def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]: __UpperCamelCase =compute_effective_axis_dimension( UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCamelCase =tokenizer.num_special_tokens_to_add(UpperCAmelCase_ ) __UpperCamelCase =compute_effective_axis_dimension( UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ ) # Generate dummy inputs according to compute batch and sequence __UpperCamelCase =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __UpperCamelCase =dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) ) return common_inputs def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase =self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) elif self.task == "causal-lm": __UpperCamelCase =self._generate_dummy_inputs_for_causal_lm( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) else: __UpperCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) return common_inputs def _a ( self , A_ , A_ , A_ , A_ ) -> Optional[int]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase =super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: __UpperCamelCase =super(UpperCAmelCase_ , self )._flatten_past_key_values_( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
62
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
0
"""simple docstring""" lowerCAmelCase_ = 'Alexander Joslin' import operator as op from .stack import Stack def __UpperCAmelCase ( __lowerCamelCase ) -> int: lowercase__ : str = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} lowercase__ : Dict = Stack() lowercase__ : Tuple = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(_SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 lowercase__ : str = operator_stack.peek() operator_stack.pop() lowercase__ : Any = operand_stack.peek() operand_stack.pop() lowercase__ : str = operand_stack.peek() operand_stack.pop() lowercase__ : List[str] = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) operand_stack.push(_SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCAmelCase_ = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
16
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase ( _lowerCamelCase : Any ): return EnvironmentCommand() class UpperCAmelCase ( snake_case__ ): @staticmethod def UpperCAmelCase_ ( lowercase_ :ArgumentParser )-> Any: A__ = parser.add_parser("env" ) download_parser.set_defaults(func=UpperCAmelCase_ ) def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]: A__ = huggingface_hub.__version__ A__ = "not installed" A__ = "NA" if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = "not installed" if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = "not installed" if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = "not installed" if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})", "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(UpperCAmelCase_ ) ) return info @staticmethod def UpperCAmelCase_ ( lowercase_ :List[str] )-> List[Any]: return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
237
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, "src", "transformers") A_ = '\n{0} = None\n' A_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' A_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _snake_case ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:List[str] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:List[str] = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCAmelCase_ ,"tokenizers" ) SCREAMING_SNAKE_CASE:Optional[int] = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCAmelCase_ ,"tensorflow_text" ) SCREAMING_SNAKE_CASE:Optional[int] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCAmelCase_ ,"sentencepiece_and_tokenizers" ) SCREAMING_SNAKE_CASE:Tuple = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCAmelCase_ ,"sentencepiece_and_tensorflow_text" ) SCREAMING_SNAKE_CASE:List[str] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCAmelCase_ ,"sentencepiece_and_tokenizers_and_vision" ) def __UpperCamelCase ( self : int ): SCREAMING_SNAKE_CASE:Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCAmelCase_ ) self.assertIn("tensorflow_text" ,UpperCAmelCase_ ) self.assertIn("sentencepiece_and_tokenizers" ,UpperCAmelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" ,objects["torch"] ) self.assertIn("TFBertModel" ,objects["tf"] ) self.assertIn("FlaxBertModel" ,objects["flax"] ) self.assertIn("BertModel" ,objects["torch"] ) self.assertIn("TFBertTokenizer" ,objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" ,objects["sentencepiece_and_tokenizers"] ) def __UpperCamelCase ( self : str ): SCREAMING_SNAKE_CASE:str = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCAmelCase_ ,"\nCONSTANT = None\n" ) SCREAMING_SNAKE_CASE:Any = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCAmelCase_ ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) SCREAMING_SNAKE_CASE:Union[str, Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" SCREAMING_SNAKE_CASE:int = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def __UpperCamelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE:Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" SCREAMING_SNAKE_CASE:int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCAmelCase_ )
139
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
0
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[Any]: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _SCREAMING_SNAKE_CASE ) __a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __a = dataset_size < in_memory_max_size else: __a = False __a = is_small_dataset(_SCREAMING_SNAKE_CASE ) assert result == expected
6
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
0
"""simple docstring""" from __future__ import annotations def lowercase ( _snake_case : int , _snake_case : Dict ) ->List[Any]: """simple docstring""" print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def lowercase ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[str] ) ->Union[str, Any]: """simple docstring""" for j in range(_SCREAMING_SNAKE_CASE ): __snake_case , __snake_case , __snake_case : Union[str, Any] = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : str , _snake_case : List[str] ) ->list[float]: """simple docstring""" __snake_case : Any = [float('''inf''' )] * vertex_count __snake_case : Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): __snake_case , __snake_case , __snake_case : int = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __snake_case : Tuple = distance[u] + w __snake_case : Any = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : int = int(input("""Enter number of vertices: """).strip()) SCREAMING_SNAKE_CASE : Dict = int(input("""Enter number of edges: """).strip()) SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} SCREAMING_SNAKE_CASE : Union[str, Any] = int(input("""\nEnter shortest path source:""").strip()) SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
102
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast a : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): lowercase = 10_000 lowercase = None lowercase = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): lowercase = ParquetConfig def _lowercase( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def _lowercase( self , A ) -> List[str]: if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) UpperCAmelCase : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): UpperCAmelCase : Optional[int] = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] UpperCAmelCase : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive UpperCAmelCase : Optional[Any] = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(UpperCAmelCase_ ): with open(UpperCAmelCase_ , """rb""" ) as f: UpperCAmelCase : Optional[int] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_ ) ) break splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"""files""": files} ) ) return splits def _lowercase( self , A ) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example UpperCAmelCase : Dict = table_cast(UpperCAmelCase_ , self.info.features.arrow_schema ) return pa_table def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : Dict = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): with open(UpperCAmelCase_ , """rb""" ) as f: UpperCAmelCase : Optional[Any] = pq.ParquetFile(UpperCAmelCase_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): UpperCAmelCase : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(UpperCAmelCase_ ) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase_ )}: {e}''' ) raise
265
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
0
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ): UpperCAmelCase__ : str = CanineTokenizer UpperCAmelCase__ : Union[str, Any] = False def snake_case_ ( self ) -> Any: super().setUp() UpperCamelCase : Optional[Any] = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case_ ( self ) -> int: return CanineTokenizer.from_pretrained('google/canine-s' ) def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> CanineTokenizer: UpperCamelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname, **UpperCAmelCase_ ) UpperCamelCase : Optional[Any] = 1024 return tokenizer @require_torch def snake_case_ ( self ) -> List[Any]: UpperCamelCase : str = self.canine_tokenizer UpperCamelCase : Optional[Any] = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off UpperCamelCase : Optional[int] = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on UpperCamelCase : List[str] = tokenizer(UpperCAmelCase_, padding=UpperCAmelCase_, return_tensors='pt' ) self.assertIsInstance(UpperCAmelCase_, UpperCAmelCase_ ) UpperCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(UpperCAmelCase_, UpperCAmelCase_ ) self.assertEqual((2, 39), batch.input_ids.shape ) self.assertEqual((2, 39), batch.attention_mask.shape ) @require_torch def snake_case_ ( self ) -> Union[str, Any]: UpperCamelCase : str = self.canine_tokenizer UpperCamelCase : int = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] UpperCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase_, padding=UpperCAmelCase_, return_tensors='pt' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids', UpperCAmelCase_ ) self.assertIn('attention_mask', UpperCAmelCase_ ) self.assertIn('token_type_ids', UpperCAmelCase_ ) @require_torch def snake_case_ ( self ) -> List[Any]: UpperCamelCase : Any = self.canine_tokenizer UpperCamelCase : Dict = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] UpperCamelCase : Optional[int] = tokenizer( text_target=UpperCAmelCase_, max_length=32, padding='max_length', truncation=UpperCAmelCase_, return_tensors='pt' ) self.assertEqual(32, targets['input_ids'].shape[1] ) def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test UpperCamelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc UpperCamelCase : Any = tempfile.mkdtemp() UpperCamelCase : str = ' He is very happy, UNwant\u00E9d,running' UpperCamelCase : int = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) UpperCamelCase : Dict = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) UpperCamelCase : str = after_tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_, UpperCAmelCase_ ) shutil.rmtree(UpperCAmelCase_ ) UpperCamelCase : Any = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc UpperCamelCase : Any = tempfile.mkdtemp() UpperCamelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running' UpperCamelCase : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: UpperCamelCase : Dict = chr(0XE007 ) additional_special_tokens.append(UpperCAmelCase_ ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) UpperCamelCase : List[str] = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) UpperCamelCase : Tuple = tokenizer.__class__.from_pretrained(UpperCAmelCase_ ) UpperCamelCase : Optional[int] = after_tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_, UpperCAmelCase_ ) self.assertIn(UpperCAmelCase_, after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) UpperCamelCase : str = tokenizer.__class__.from_pretrained(UpperCAmelCase_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(UpperCAmelCase_ ) def snake_case_ ( self ) -> int: UpperCamelCase : List[str] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase , UpperCamelCase : Any = self.get_clean_sequence(UpperCAmelCase_ ) # a special token for Canine can be defined as follows: UpperCamelCase : Dict = 0XE005 UpperCamelCase : int = chr(UpperCAmelCase_ ) tokenizer.add_special_tokens({'cls_token': special_token} ) UpperCamelCase : List[Any] = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ), 1 ) UpperCamelCase : Union[str, Any] = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=UpperCAmelCase_ ) UpperCamelCase : Optional[Any] = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) UpperCamelCase : int = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) UpperCamelCase : Tuple = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_, input_encoded + special_token_id ) UpperCamelCase : Any = tokenizer.decode(UpperCAmelCase_, skip_special_tokens=UpperCAmelCase_ ) self.assertTrue(special_token not in decoded ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase : Dict = chr(0XE005 ) UpperCamelCase : Any = chr(0XE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=UpperCAmelCase_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} ) UpperCamelCase : List[str] = tokenizer.tokenize(UpperCAmelCase_ ) UpperCamelCase : List[str] = tokenizer.tokenize(UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ), 1 ) self.assertEqual(len(UpperCAmelCase_ ), 1 ) self.assertEqual(token_a[0], UpperCAmelCase_ ) self.assertEqual(token_a[0], UpperCAmelCase_ ) @require_tokenizers def snake_case_ ( self ) -> Dict: UpperCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: UpperCamelCase : List[str] = 0XE006 UpperCamelCase : str = chr(UpperCAmelCase_ ) UpperCamelCase : Dict = AddedToken(UpperCAmelCase_, lstrip=UpperCAmelCase_ ) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(UpperCAmelCase_ ) tokenizer.from_pretrained(UpperCAmelCase_ ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file: UpperCamelCase : List[str] = json.load(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file: UpperCamelCase : Union[str, Any] = json.load(UpperCAmelCase_ ) # a special token for Canine can be defined as follows: UpperCamelCase : Dict = 0XE006 UpperCamelCase : Optional[Any] = chr(UpperCAmelCase_ ) UpperCamelCase : int = [new_token_a] UpperCamelCase : str = [new_token_a] with open(os.path.join(UpperCAmelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(UpperCAmelCase_, UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(UpperCAmelCase_, UpperCAmelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCamelCase : Dict = tokenizer_class.from_pretrained(UpperCAmelCase_, extra_ids=0 ) self.assertIn(UpperCAmelCase_, tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ), ) UpperCamelCase : Union[str, Any] = 0XE007 UpperCamelCase : Optional[Any] = chr(UpperCAmelCase_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCamelCase : Optional[Any] = [AddedToken(UpperCAmelCase_, lstrip=UpperCAmelCase_ )] UpperCamelCase : Dict = tokenizer_class.from_pretrained( UpperCAmelCase_, additional_special_tokens=UpperCAmelCase_, extra_ids=0 ) self.assertIn(UpperCAmelCase_, tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : List[Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase : List[Any] = 'hello world' if self.space_between_special_tokens: UpperCamelCase : Optional[Any] = '[CLS] hello world [SEP]' else: UpperCamelCase : Tuple = input UpperCamelCase : List[str] = tokenizer.encode(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ) UpperCamelCase : Tuple = tokenizer.decode(UpperCAmelCase_, spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(UpperCAmelCase_, [output, output.lower()] ) def snake_case_ ( self ) -> Any: UpperCamelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase : Tuple = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] UpperCamelCase : Union[str, Any] = 'a' UpperCamelCase : Any = ord(UpperCAmelCase_ ) for attr in attributes_list: setattr(UpperCAmelCase_, attr + '_id', UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_, UpperCAmelCase_ ), UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_, attr + '_id' ), UpperCAmelCase_ ) setattr(UpperCAmelCase_, attr + '_id', UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_, UpperCAmelCase_ ), UpperCAmelCase_ ) self.assertEqual(getattr(UpperCAmelCase_, attr + '_id' ), UpperCAmelCase_ ) setattr(UpperCAmelCase_, 'additional_special_tokens_ids', [] ) self.assertListEqual(getattr(UpperCAmelCase_, 'additional_special_tokens' ), [] ) self.assertListEqual(getattr(UpperCAmelCase_, 'additional_special_tokens_ids' ), [] ) UpperCamelCase : Dict = 0XE006 UpperCamelCase : List[str] = chr(UpperCAmelCase_ ) setattr(UpperCAmelCase_, 'additional_special_tokens_ids', [additional_special_token_id] ) self.assertListEqual(getattr(UpperCAmelCase_, 'additional_special_tokens' ), [additional_special_token] ) self.assertListEqual(getattr(UpperCAmelCase_, 'additional_special_tokens_ids' ), [additional_special_token_id] ) def snake_case_ ( self ) -> Dict: pass def snake_case_ ( self ) -> Optional[int]: pass def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> List[Any]: pass def snake_case_ ( self ) -> int: pass def snake_case_ ( self ) -> List[str]: pass def snake_case_ ( self ) -> Optional[int]: pass def snake_case_ ( self ) -> str: pass
119
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
0
'''simple docstring''' a__ : int = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 1_0: 'a', 1_1: 'b', 1_2: 'c', 1_3: 'd', 1_4: 'e', 1_5: 'f', } def _lowercase ( __A ): '''simple docstring''' assert type(_SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = int(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = """""" __UpperCamelCase = False if decimal < 0: __UpperCamelCase = True decimal *= -1 while decimal > 0: __UpperCamelCase , __UpperCamelCase = divmod(_SCREAMING_SNAKE_CASE ,16 ) __UpperCamelCase = values[remainder] + hexadecimal __UpperCamelCase = """0x""" + hexadecimal if negative: __UpperCamelCase = """-""" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
349
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
0
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = {'vocab_file': 'spiece.model'} __lowerCamelCase = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __lowerCamelCase = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class A__ ( snake_case__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: '''simple docstring''' A_ = {} if sp_model_kwargs is None else sp_model_kwargs A_ = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) A_ = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing A_ = """<|endoftext|>""" if eos_token is None else eos_token A_ = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: A_ = unk_token if pad_token is None else pad_token A_ = eos_token if bos_token is None else bos_token else: A_ = """<pad>""" if pad_token is None else pad_token A_ = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) A_ = do_lower_case A_ = remove_space A_ = keep_accents A_ = vocab_file A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) # Used for whitespace normalization in input texts # fmt : off A_ = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing A_ = re.compile( f'''[{"".join(map(UpperCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self ) -> Tuple: '''simple docstring''' A_ = self.__dict__.copy() A_ = None return state def __setstate__( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A_ = {} A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def snake_case_ ( self ) -> int: '''simple docstring''' return len(self.sp_model ) def snake_case_ ( self , UpperCamelCase__ ) -> str: '''simple docstring''' A_ = self.non_printing_characters_re.sub("""""" , UpperCAmelCase_ ) # Normalize whitespaces A_ = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization A_ = unicodedata.normalize("""NFC""" , UpperCAmelCase_ ) return text def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = self.preprocess_text(UpperCAmelCase_ ) return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def snake_case_ ( self , UpperCamelCase__ ) -> int: '''simple docstring''' return self.sp_model.PieceToId(UpperCAmelCase_ ) def snake_case_ ( self , UpperCamelCase__ ) -> str: '''simple docstring''' return self.sp_model.IdToPiece(UpperCAmelCase_ ) @staticmethod def snake_case_ ( UpperCamelCase__ ) -> str: '''simple docstring''' return out_string def snake_case_ ( self , UpperCamelCase__ ) -> str: '''simple docstring''' A_ = [] A_ = """""" A_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token A_ = True A_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) A_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string def snake_case_ ( self ) -> Dict[str, int]: '''simple docstring''' A_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: A_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): A_ = self.preprocess_text(UpperCAmelCase_ ) A_ = self.sp_model.encode(UpperCAmelCase_ ) else: A_ = [self.preprocess_text(UpperCAmelCase_ ) for t in text] A_ = self.sp_model.encode(UpperCAmelCase_ ) if return_tensors is True or return_tensors == "pt": A_ = torch.tensor(UpperCAmelCase_ ) return token_ids def snake_case_ ( self , UpperCamelCase__ ) -> str: '''simple docstring''' return self.sp_model.decode(UpperCAmelCase_ ) def snake_case_ ( self , UpperCamelCase__ ) -> List[int]: '''simple docstring''' A_ = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] A_ = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(UpperCAmelCase_ ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=UpperCAmelCase_ )
162
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() A : int = logging.get_logger(__name__) def UpperCamelCase ( __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" lowercase__ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) lowercase__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _SCREAMING_SNAKE_CASE ) if matches: lowercase__ = float(matches[1] ) lowercase__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowercase__ = 1001 lowercase__ = """imagenet-1k-id2label.json""" lowercase__ = """huggingface/label-files""" lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()} lowercase__ = """background""" lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def UpperCamelCase ( ) -> Dict: """simple docstring""" lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : str=False ) -> Optional[Any]: """simple docstring""" lowercase__ = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE ) # Load 🤗 model lowercase__ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowercase__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) lowercase__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase__ = model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": lowercase__ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": lowercase__ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: lowercase__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing to the hub...""" ) lowercase__ = """google/""" + model_name image_processor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
305
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING _A = logging.get_logger(__name__) class UpperCAmelCase__ ( snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = """upernet""" def __init__( self , A_=None , A_=512 , A_=0.02 , A_=[1, 2, 3, 6] , A_=True , A_=0.4 , A_=384 , A_=256 , A_=1 , A_=False , A_=255 , **A_ , ) -> Union[str, Any]: super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) __UpperCamelCase =CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCamelCase =backbone_config.get('model_type' ) __UpperCamelCase =CONFIG_MAPPING[backbone_model_type] __UpperCamelCase =config_class.from_dict(UpperCAmelCase_ ) __UpperCamelCase =backbone_config __UpperCamelCase =hidden_size __UpperCamelCase =initializer_range __UpperCamelCase =pool_scales __UpperCamelCase =use_auxiliary_head __UpperCamelCase =auxiliary_loss_weight __UpperCamelCase =auxiliary_in_channels __UpperCamelCase =auxiliary_channels __UpperCamelCase =auxiliary_num_convs __UpperCamelCase =auxiliary_concat_input __UpperCamelCase =loss_ignore_index def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.backbone_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowerCAmelCase_ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' lowerCAmelCase_ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' lowerCAmelCase_ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : int ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] ,reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : str=None ,_snake_case : Union[str, Any]=True ,_snake_case : Union[str, Any]=False ) -> Any: """simple docstring""" if rouge_types is None: lowercase__ : int = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] lowercase__ : List[str] = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ ,use_stemmer=UpperCAmelCase_ ) if use_aggregator: lowercase__ : List[str] = scoring.BootstrapAggregator() else: lowercase__ : List[Any] = [] for ref, pred in zip(UpperCAmelCase_ ,UpperCAmelCase_ ): lowercase__ : str = scorer.score(UpperCAmelCase_ ,UpperCAmelCase_ ) if use_aggregator: aggregator.add_scores(UpperCAmelCase_ ) else: scores.append(UpperCAmelCase_ ) if use_aggregator: lowercase__ : int = aggregator.aggregate() else: lowercase__ : Any = {} for key in scores[0]: lowercase__ : Optional[Any] = [score[key] for score in scores] return result
16
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :List[Any] )-> Dict: debug_launcher(test_script.main ) def UpperCAmelCase_ ( self :Any )-> int: debug_launcher(test_ops.main )
237
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: SCREAMING_SNAKE_CASE:str = ksize + 1 SCREAMING_SNAKE_CASE:Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(_SCREAMING_SNAKE_CASE ): for x in range(_SCREAMING_SNAKE_CASE ): # distance from center SCREAMING_SNAKE_CASE:Optional[int] = x - ksize // 2 SCREAMING_SNAKE_CASE:Union[str, Any] = y - ksize // 2 # degree to radiant SCREAMING_SNAKE_CASE:Any = theta / 180 * np.pi SCREAMING_SNAKE_CASE:int = np.cos(_theta ) SCREAMING_SNAKE_CASE:Optional[int] = np.sin(_theta ) # get kernel x SCREAMING_SNAKE_CASE:Dict = cos_theta * px + sin_theta * py # get kernel y SCREAMING_SNAKE_CASE:Optional[Any] = -sin_theta * px + cos_theta * py # fill kernel SCREAMING_SNAKE_CASE:str = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image A_ = imread("../image_data/lena.jpg") # turn image in gray scale value A_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges A_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) A_ = out / out.max() * 2_55 A_ = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
139
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
0
from __future__ import annotations from typing import Generic, TypeVar A : Dict = TypeVar('T') class __A( Generic[T] ): def __init__( self , _snake_case ) -> None: '''simple docstring''' __a = data __a = self __a = 0 class __A( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = DisjointSetTreeNode(UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> DisjointSetTreeNode[T]: '''simple docstring''' __a = self.map[data] if elem_ref != elem_ref.parent: __a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None: '''simple docstring''' if nodea.rank > nodea.rank: __a = nodea else: __a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None: '''simple docstring''' self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __A( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' if node not in self.connections: __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> None: '''simple docstring''' self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) __a = weight __a = weight def SCREAMING_SNAKE_CASE_ ( self ) -> GraphUndirectedWeighted[T]: '''simple docstring''' __a = [] __a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _snake_case : x[2] ) # creating the disjoint set __a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation __a = 0 __a = 0 __a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __a , __a , __a = edges[index] index += 1 __a = disjoint_set.find_set(UpperCAmelCase_ ) __a = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
6
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node SCREAMING_SNAKE_CASE : str = 4 SCREAMING_SNAKE_CASE : Dict = 3 class _UpperCAmelCase ( snake_case__ ): '''simple docstring''' pass def lowercase ( _snake_case : List[str] ) ->int: """simple docstring""" for shard in shards: for i in range(_SCREAMING_SNAKE_CASE ): yield {"i": i, "shard": shard} def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = int(os.environ['''RANK'''] ) __snake_case : Tuple = int(os.environ['''WORLD_SIZE'''] ) __snake_case : Tuple = ArgumentParser() parser.add_argument('''--streaming''' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--local_rank''' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--num_workers''' , type=_SCREAMING_SNAKE_CASE , default=0 ) __snake_case : int = parser.parse_args() __snake_case : int = args.streaming __snake_case : Any = args.num_workers __snake_case : Any = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(_SCREAMING_SNAKE_CASE )]} __snake_case : Optional[Any] = IterableDataset.from_generator(_SCREAMING_SNAKE_CASE , gen_kwargs=_SCREAMING_SNAKE_CASE ) if not streaming: __snake_case : Any = Dataset.from_list(list(_SCREAMING_SNAKE_CASE ) ) __snake_case : Any = split_dataset_by_node(_SCREAMING_SNAKE_CASE , rank=_SCREAMING_SNAKE_CASE , world_size=_SCREAMING_SNAKE_CASE ) __snake_case : Any = torch.utils.data.DataLoader(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[Any] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
102
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = SwinConfig() UpperCAmelCase : List[str] = swin_name.split("""_""" ) UpperCAmelCase : str = name_split[1] UpperCAmelCase : List[Any] = int(name_split[4] ) UpperCAmelCase : Any = int(name_split[3][-1] ) if model_size == "tiny": UpperCAmelCase : Any = 9_6 UpperCAmelCase : Optional[Any] = (2, 2, 6, 2) UpperCAmelCase : Optional[Any] = (3, 6, 1_2, 2_4) elif model_size == "small": UpperCAmelCase : Union[str, Any] = 9_6 UpperCAmelCase : Any = (2, 2, 1_8, 2) UpperCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4) elif model_size == "base": UpperCAmelCase : Optional[Any] = 1_2_8 UpperCAmelCase : Optional[Any] = (2, 2, 1_8, 2) UpperCAmelCase : Any = (4, 8, 1_6, 3_2) else: UpperCAmelCase : Optional[int] = 1_9_2 UpperCAmelCase : str = (2, 2, 1_8, 2) UpperCAmelCase : List[str] = (6, 1_2, 2_4, 4_8) if "in22k" in swin_name: UpperCAmelCase : List[str] = 2_1_8_4_1 else: UpperCAmelCase : int = 1_0_0_0 UpperCAmelCase : List[Any] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : int = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} UpperCAmelCase : Dict = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} UpperCAmelCase : int = img_size UpperCAmelCase : Any = num_classes UpperCAmelCase : List[Any] = embed_dim UpperCAmelCase : List[str] = depths UpperCAmelCase : str = num_heads UpperCAmelCase : Optional[int] = window_size return config def __lowerCamelCase ( _lowercase ) -> Any: if "patch_embed.proj" in name: UpperCAmelCase : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: UpperCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: UpperCAmelCase : Union[str, Any] = """encoder.""" + name if "attn.proj" in name: UpperCAmelCase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase : Optional[int] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase : List[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": UpperCAmelCase : int = """layernorm.weight""" if name == "norm.bias": UpperCAmelCase : Optional[int] = """layernorm.bias""" if "head" in name: UpperCAmelCase : str = name.replace("""head""" , """classifier""" ) else: UpperCAmelCase : int = """swin.""" + name return name def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase : Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: UpperCAmelCase : str = key.split(""".""" ) UpperCAmelCase : Optional[Any] = int(key_split[1] ) UpperCAmelCase : Tuple = int(key_split[3] ) UpperCAmelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase : Dict = val[:dim, :] UpperCAmelCase : Optional[Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : int = val[ :dim ] UpperCAmelCase : Any = val[ dim : dim * 2 ] UpperCAmelCase : Dict = val[ -dim: ] else: UpperCAmelCase : List[str] = val return orig_state_dict def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: UpperCAmelCase : Any = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() UpperCAmelCase : int = get_swin_config(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase : List[str] = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : int = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) UpperCAmelCase : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) UpperCAmelCase : List[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) UpperCAmelCase : Optional[Any] = timm_model(inputs["""pixel_values"""] ) UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Any = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
265
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCamelCase ( *snake_case__ : List[str] ) -> Optional[int]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCamelCase : Any = list(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCamelCase : List[Any] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> bool: UpperCamelCase : Optional[int] = [ 'CUDA out of memory.', # CUDA OOM 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU 'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCamelCase ( snake_case__ : Dict = None , snake_case__ : int = 128 ) -> int: if function is None: return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = starting_batch_size def decorator(*snake_case__ : Union[str, Any] , **snake_case__ : Any ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() UpperCamelCase : str = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1): UpperCamelCase : Any = ', '.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError('No executable batch size found, reached zero.' ) try: return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
119
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
0
'''simple docstring''' def _lowercase ( __A = 50_000_000 ): '''simple docstring''' __UpperCamelCase = set() __UpperCamelCase = int((limit - 24) ** (1 / 2) ) __UpperCamelCase = set(range(3 ,prime_square_limit + 1 ,2 ) ) primes.add(2 ) for p in range(3 ,prime_square_limit + 1 ,2 ): if p not in primes: continue primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,_SCREAMING_SNAKE_CASE ) ) ) for primea in primes: __UpperCamelCase = primea * primea for primea in primes: __UpperCamelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __UpperCamelCase = primea * primea * primea * primea __UpperCamelCase = square + cube + tetr if total >= limit: break ret.add(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f'''{solution() = }''')
349
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
0
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def UpperCAmelCase__ ( UpperCAmelCase__="ro", UpperCAmelCase__="en", UpperCAmelCase__="wmt16", UpperCAmelCase__=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) A_ = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) A_ = datasets.load_dataset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) if save_dir is None: A_ = F'''{dataset}-{pair}''' A_ = Path(_SCREAMING_SNAKE_CASE ) save_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets A_ = """val""" if split == """validation""" else split A_ = save_dir.joinpath(F'''{fn}.source''' ) A_ = save_dir.joinpath(F'''{fn}.target''' ) A_ = src_path.open("""w+""" ) A_ = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): A_ = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
162
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
0
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Dict: """simple docstring""" with open(_SCREAMING_SNAKE_CASE ) as metadata_file: lowercase__ = json.load(_SCREAMING_SNAKE_CASE ) lowercase__ = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # Load the entity vocab file lowercase__ = load_entity_vocab(_SCREAMING_SNAKE_CASE ) lowercase__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks lowercase__ = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) lowercase__ = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens lowercase__ = state_dict["""embeddings.word_embeddings.weight"""] lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 ) lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 ) lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowercase__ = f'''encoder.layer.{layer_index}.attention.self.''' lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase__ = state_dict["""entity_embeddings.entity_embeddings.weight"""] lowercase__ = entity_emb[entity_vocab["""[MASK]"""]] lowercase__ = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval() lowercase__ , lowercase__ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'''Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )): raise ValueError( """Unexpected keys""" f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs lowercase__ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" ) lowercase__ = ( """Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the""" """ new world number one avoid a humiliating second- round exit at Wimbledon .""" ) lowercase__ = (39, 42) lowercase__ = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) lowercase__ = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": lowercase__ = torch.Size((1, 42, 1024) ) lowercase__ = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base lowercase__ = torch.Size((1, 42, 768) ) lowercase__ = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": lowercase__ = torch.Size((1, 1, 1024) ) lowercase__ = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base lowercase__ = torch.Size((1, 1, 768) ) lowercase__ = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" lowercase__ = {} with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for index, line in enumerate(_SCREAMING_SNAKE_CASE ): lowercase__ , lowercase__ = line.rstrip().split("""\t""" ) lowercase__ = index return entity_vocab if __name__ == "__main__": A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) A : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
305
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase =[0 for i in range(len(_SCREAMING_SNAKE_CASE ) )] # initialize interval's left pointer and right pointer __UpperCamelCase , __UpperCamelCase =0, 0 for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # case when current index is inside the interval if i <= right_pointer: __UpperCamelCase =min(right_pointer - i + 1 , z_result[i - left_pointer] ) __UpperCamelCase =min_edge while go_next(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __UpperCamelCase , __UpperCamelCase =i, i + z_result[i] - 1 return z_result def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): return i + z_result[i] < len(_SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]] def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): __UpperCamelCase =0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __UpperCamelCase =z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_SCREAMING_SNAKE_CASE ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
62
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False ) -> Any: lowercase__ : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase__ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: lowercase__ : List[Any] = '''''' else: lowercase__ : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[ : config.hidden_size, : ] lowercase__ : str = in_proj_bias[: config.hidden_size] lowercase__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : List[Any] = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]: lowercase__ : int = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: lowercase__ : List[str] = dct.pop(_SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = val def __UpperCAmelCase ( ) -> Any: lowercase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ : List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any: lowercase__ : Optional[Any] = ViTConfig() lowercase__ : List[str] = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": lowercase__ : Optional[Any] = True lowercase__ : Optional[int] = int(vit_name[-12:-10] ) lowercase__ : Union[str, Any] = int(vit_name[-9:-6] ) else: lowercase__ : Union[str, Any] = 10_00 lowercase__ : str = '''huggingface/label-files''' lowercase__ : Optional[int] = '''imagenet-1k-id2label.json''' lowercase__ : Optional[int] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ : Optional[Any] = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : int = int(vit_name[-6:-4] ) lowercase__ : Optional[int] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): lowercase__ : Tuple = 1_92 lowercase__ : Any = 7_68 lowercase__ : Any = 12 lowercase__ : List[str] = 3 elif vit_name[9:].startswith('''small''' ): lowercase__ : List[Any] = 3_84 lowercase__ : Any = 15_36 lowercase__ : str = 12 lowercase__ : int = 6 else: pass else: if vit_name[4:].startswith('''small''' ): lowercase__ : int = 7_68 lowercase__ : List[str] = 23_04 lowercase__ : int = 8 lowercase__ : Any = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): lowercase__ : Union[str, Any] = 10_24 lowercase__ : Dict = 40_96 lowercase__ : Tuple = 24 lowercase__ : Union[str, Any] = 16 elif vit_name[4:].startswith('''huge''' ): lowercase__ : int = 12_80 lowercase__ : Tuple = 51_20 lowercase__ : str = 32 lowercase__ : Union[str, Any] = 16 # load original model from timm lowercase__ : Optional[int] = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase__ : Union[str, Any] = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) lowercase__ : Any = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": lowercase__ : Optional[Any] = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: lowercase__ : Dict = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: lowercase__ : int = DeiTImageProcessor(size=config.image_size ) else: lowercase__ : List[Any] = ViTImageProcessor(size=config.image_size ) lowercase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowercase__ : str = encoding['''pixel_values'''] lowercase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE ) if base_model: lowercase__ : int = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: lowercase__ : str = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
16
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : __lowercase = 42 __lowercase = None # Automatically constructed __lowercase = "dict" __lowercase = None __lowercase = field(default="""Translation""" , init=snake_case__ , repr=snake_case__ ) def __call__( self :str )-> int: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase_ ( self :str )-> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("string" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : __lowercase = None __lowercase = None __lowercase = None # Automatically constructed __lowercase = "dict" __lowercase = None __lowercase = field(default="""TranslationVariableLanguages""" , init=snake_case__ , repr=snake_case__ ) def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]: A__ = sorted(set(self.languages ) ) if self.languages else None A__ = len(self.languages ) if self.languages else None def __call__( self :str )-> str: return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} ) def UpperCAmelCase_ ( self :List[str] , lowercase_ :Union[str, Any] )-> Any: A__ = set(self.languages ) if self.languages and set(UpperCAmelCase_ ) - lang_set: raise ValueError( F"Some languages in example ({', '.join(sorted(set(UpperCAmelCase_ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase_ )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. A__ = [] for lang, text in translation_dict.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. A__, A__ = zip(*sorted(UpperCAmelCase_ ) ) return {"language": languages, "translation": translations} def UpperCAmelCase_ ( self :Any )-> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("string" ) ), "translation": Sequence(Value("string" ) ), }
237
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) A_ = logging.get_logger(__name__) # pylint: disable=invalid-name A_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def A_ ( snake_case , snake_case , snake_case=8 ): SCREAMING_SNAKE_CASE:Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE:Optional[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def A_ ( snake_case , snake_case=512 , snake_case=512 ): SCREAMING_SNAKE_CASE:Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) SCREAMING_SNAKE_CASE:str = np.array(pil_image.convert("RGB" ) ) SCREAMING_SNAKE_CASE:Union[str, Any] = arr.astype(np.floataa ) / 127.5 - 1 SCREAMING_SNAKE_CASE:List[Any] = np.transpose(_SCREAMING_SNAKE_CASE , [2, 0, 1] ) SCREAMING_SNAKE_CASE:str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) return image class _snake_case ( snake_case__ ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : UNetaDConditionModel ,SCREAMING_SNAKE_CASE__ : DDPMScheduler ,SCREAMING_SNAKE_CASE__ : VQModel ,): super().__init__() self.register_modules( unet=UpperCAmelCase_ ,scheduler=UpperCAmelCase_ ,movq=UpperCAmelCase_ ,) SCREAMING_SNAKE_CASE:Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ): SCREAMING_SNAKE_CASE:int = min(int(num_inference_steps * strength ) ,UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:str = max(num_inference_steps - init_timestep ,0 ) SCREAMING_SNAKE_CASE:str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ): if not isinstance(UpperCAmelCase_ ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}''' ) SCREAMING_SNAKE_CASE:Optional[int] = image.to(device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Optional[int] = batch_size * num_images_per_prompt if image.shape[1] == 4: SCREAMING_SNAKE_CASE:Any = image else: if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): SCREAMING_SNAKE_CASE:Optional[Any] = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ ) ] SCREAMING_SNAKE_CASE:Optional[int] = torch.cat(UpperCAmelCase_ ,dim=0 ) else: SCREAMING_SNAKE_CASE:Union[str, Any] = self.movq.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:str = self.movq.config.scaling_factor * init_latents SCREAMING_SNAKE_CASE:Tuple = torch.cat([init_latents] ,dim=0 ) SCREAMING_SNAKE_CASE:Optional[Any] = init_latents.shape SCREAMING_SNAKE_CASE:str = randn_tensor(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ,dtype=UpperCAmelCase_ ) # get latents SCREAMING_SNAKE_CASE:Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Union[str, Any] = init_latents return latents def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) SCREAMING_SNAKE_CASE:Any = torch.device(F'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE:Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase_ ,UpperCAmelCase_ ) def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=0 ): if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) SCREAMING_SNAKE_CASE:Optional[int] = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" ,silence_dtype_warnings=UpperCAmelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE:List[str] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = cpu_offload_with_hook(UpperCAmelCase_ ,UpperCAmelCase_ ,prev_module_hook=UpperCAmelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE:List[str] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCamelCase ( self : int ): if not hasattr(self.unet ,"_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase_ ,"_hf_hook" ) and hasattr(module._hf_hook ,"execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase_ ) def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 100 ,SCREAMING_SNAKE_CASE__ : float = 4.0 ,SCREAMING_SNAKE_CASE__ : float = 0.3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,): SCREAMING_SNAKE_CASE:str = self._execution_device SCREAMING_SNAKE_CASE:Tuple = guidance_scale > 1.0 if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): SCREAMING_SNAKE_CASE:int = torch.cat(UpperCAmelCase_ ,dim=0 ) SCREAMING_SNAKE_CASE:List[Any] = image_embeds.shape[0] if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): SCREAMING_SNAKE_CASE:List[str] = torch.cat(UpperCAmelCase_ ,dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE:List[str] = image_embeds.repeat_interleave(UpperCAmelCase_ ,dim=0 ) SCREAMING_SNAKE_CASE:int = negative_image_embeds.repeat_interleave(UpperCAmelCase_ ,dim=0 ) SCREAMING_SNAKE_CASE:List[str] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=UpperCAmelCase_ ) if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): SCREAMING_SNAKE_CASE:List[str] = [image] if not all(isinstance(UpperCAmelCase_ ,(PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'''Input is in incorrect format: {[type(UpperCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) SCREAMING_SNAKE_CASE:Any = torch.cat([prepare_image(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) for i in image] ,dim=0 ) SCREAMING_SNAKE_CASE:Tuple = image.to(dtype=image_embeds.dtype ,device=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Any = self.movq.encode(UpperCAmelCase_ )["latents"] SCREAMING_SNAKE_CASE:Optional[Any] = latents.repeat_interleave(UpperCAmelCase_ ,dim=0 ) self.scheduler.set_timesteps(UpperCAmelCase_ ,device=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.get_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = downscale_height_and_width(UpperCAmelCase_ ,UpperCAmelCase_ ,self.movq_scale_factor ) SCREAMING_SNAKE_CASE:str = self.prepare_latents( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,image_embeds.dtype ,UpperCAmelCase_ ,UpperCAmelCase_ ) for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE:Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE:Any = {"image_embeds": image_embeds} SCREAMING_SNAKE_CASE:Dict = self.unet( sample=UpperCAmelCase_ ,timestep=UpperCAmelCase_ ,encoder_hidden_states=UpperCAmelCase_ ,added_cond_kwargs=UpperCAmelCase_ ,return_dict=UpperCAmelCase_ ,)[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = noise_pred.split(latents.shape[1] ,dim=1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE:Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE:str = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE:Optional[Any] = self.scheduler.step( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,generator=UpperCAmelCase_ ,)[0] # post-processing SCREAMING_SNAKE_CASE:Optional[Any] = self.movq.decode(UpperCAmelCase_ ,force_not_quantize=UpperCAmelCase_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE:Optional[Any] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE:Tuple = image.clamp(0 ,1 ) SCREAMING_SNAKE_CASE:int = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE:Any = self.numpy_to_pil(UpperCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase_ )
139
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
0
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": A : Union[str, Any] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') A : str = parser.parse_args() if args.model_type == "bert": A : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name) A : Tuple = 'bert' else: raise ValueError('args.model_type should be "bert".') A : Tuple = model.state_dict() A : int = {} for w in ["word_embeddings", "position_embeddings"]: A : Optional[int] = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: A : str = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] A : Optional[int] = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: A : int = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] A : Any = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] A : Optional[int] = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] A : int = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] A : Tuple = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] A : str = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] A : List[str] = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] A : List[str] = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 A : Optional[Any] = state_dict['cls.predictions.decoder.weight'] A : Tuple = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: A : Tuple = state_dict[F"cls.predictions.transform.dense.{w}"] A : Optional[int] = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
6
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
0
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =KandinskyVaaPriorPipeline lowerCamelCase__ =["""prompt"""] lowerCamelCase__ =["""prompt""", """negative_prompt"""] lowerCamelCase__ =[ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] lowerCamelCase__ =False @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.time_input_dim @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 1_00 @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(UpperCAmelCase_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } __snake_case : List[str] = PriorTransformer(**UpperCAmelCase_ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : List[str] = CLIPVisionModelWithProjection(UpperCAmelCase_ ) return model @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = CLIPImageProcessor( crop_size=2_24 , do_center_crop=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , ) return image_processor def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : int = self.dummy_text_encoder __snake_case : int = self.dummy_tokenizer __snake_case : List[Any] = self.dummy_image_processor __snake_case : Tuple = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=UpperCAmelCase_ , clip_sample_range=10.0 , ) __snake_case : Optional[Any] = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ): '''simple docstring''' if str(UpperCAmelCase_ ).startswith('''mps''' ): __snake_case : str = torch.manual_seed(UpperCAmelCase_ ) else: __snake_case : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) __snake_case : Optional[int] = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = '''cpu''' __snake_case : int = self.get_dummy_components() __snake_case : str = self.pipeline_class(**UpperCAmelCase_ ) __snake_case : Optional[int] = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) __snake_case : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) __snake_case : Tuple = output.image_embeds __snake_case : List[str] = pipe( **self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0] __snake_case : Dict = image[0, -10:] __snake_case : Optional[int] = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = torch_device == '''cpu''' __snake_case : Dict = True __snake_case : Dict = False self._test_inference_batch_single_identical( test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , test_mean_pixel_difference=UpperCAmelCase_ , ) @skip_mps def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = torch_device == '''cpu''' __snake_case : Tuple = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCAmelCase_ , test_mean_pixel_difference=UpperCAmelCase_ , )
102
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=4 , ) -> Tuple: UpperCAmelCase : Dict = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : int = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : Any = use_attention_mask UpperCAmelCase : Any = use_token_type_ids UpperCAmelCase : List[str] = use_labels UpperCAmelCase : Tuple = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : int = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : str = type_vocab_size UpperCAmelCase : Optional[Any] = type_sequence_label_size UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Any = num_choices def _lowercase( self ) -> str: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Tuple = None if self.use_attention_mask: UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : str = None if self.use_token_type_ids: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase( self ) -> Dict: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class UpperCamelCase_ ( snake_case__ , unittest.TestCase ): lowercase = True lowercase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Any = FlaxRoFormerModelTester(self ) @slow def _lowercase( self ) -> List[str]: for model_class_name in self.all_model_classes: UpperCAmelCase : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) UpperCAmelCase : int = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> Dict: UpperCAmelCase : Tuple = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) UpperCAmelCase : str = jnp.array([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase : Dict = model(UpperCAmelCase_ )[0] UpperCAmelCase : Optional[Any] = 50000 UpperCAmelCase : Optional[int] = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) UpperCAmelCase : Tuple = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
265
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __UpperCAmelCase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( snake_case__ ): UpperCAmelCase__ : Any = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self, **SCREAMING_SNAKE_CASE_ ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCamelCase : Union[str, Any] = deprecated_arg[3:] setattr(self, UpperCAmelCase_, not kwargs.pop(UpperCAmelCase_ ) ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) UpperCamelCase : Union[str, Any] = kwargs.pop('torchscript', self.torchscript ) UpperCamelCase : int = kwargs.pop('torch_xla_tpu_print_metrics', self.torch_xla_tpu_print_metrics ) UpperCamelCase : Optional[int] = kwargs.pop('fp16_opt_level', self.fpaa_opt_level ) super().__init__(**UpperCAmelCase_ ) UpperCAmelCase__ : bool = field(default=snake_case__ , metadata={"help": "Trace the models using torchscript"} ) UpperCAmelCase__ : bool = field(default=snake_case__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) UpperCAmelCase__ : str = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def snake_case_ ( self ) -> Tuple["torch.device", int]: requires_backends(self, ['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: UpperCamelCase : Optional[int] = torch.device('cpu' ) UpperCamelCase : Any = 0 elif is_torch_tpu_available(): UpperCamelCase : Tuple = xm.xla_device() UpperCamelCase : Tuple = 0 else: UpperCamelCase : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) UpperCamelCase : Optional[int] = torch.cuda.device_count() return device, n_gpu @property def snake_case_ ( self ) -> Tuple: return is_torch_tpu_available() and self.tpu @property def snake_case_ ( self ) -> int: requires_backends(self, ['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def snake_case_ ( self ) -> "torch.device": requires_backends(self, ['torch'] ) return self._setup_devices[0] @property def snake_case_ ( self ) -> int: requires_backends(self, ['torch'] ) return self._setup_devices[1] @property def snake_case_ ( self ) -> List[Any]: return self.n_gpu > 0
119
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
0
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase__ : def __init__( self ) -> Optional[int]: __UpperCamelCase = [] def __lowerCamelCase ( self , lowercase ) -> List[Any]: return self.node_position[vertex] def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: __UpperCamelCase = pos def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __UpperCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __UpperCamelCase = 2 * start + 1 else: __UpperCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: __UpperCamelCase , __UpperCamelCase = heap[smallest_child], positions[smallest_child] __UpperCamelCase , __UpperCamelCase = ( heap[start], positions[start], ) __UpperCamelCase , __UpperCamelCase = temp, tempa __UpperCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , UpperCAmelCase_ ) self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: __UpperCamelCase = position[index] while index != 0: __UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __UpperCamelCase = heap[parent] __UpperCamelCase = position[parent] self.set_position(position[parent] , UpperCAmelCase_ ) else: __UpperCamelCase = val __UpperCamelCase = temp self.set_position(UpperCAmelCase_ , UpperCAmelCase_ ) break __UpperCamelCase = parent else: __UpperCamelCase = val __UpperCamelCase = temp self.set_position(UpperCAmelCase_ , 0 ) def __lowerCamelCase ( self , lowercase , lowercase ) -> int: __UpperCamelCase = len(UpperCAmelCase_ ) // 2 - 1 for i in range(UpperCAmelCase_ , -1 , -1 ): self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase = positions[0] __UpperCamelCase = sys.maxsize self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return temp def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = Heap() __UpperCamelCase = [0] * len(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex __UpperCamelCase = [] for vertex in range(len(_SCREAMING_SNAKE_CASE ) ): distance_tv.append(sys.maxsize ) positions.append(_SCREAMING_SNAKE_CASE ) heap.node_position.append(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = [] __UpperCamelCase = 1 __UpperCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: __UpperCamelCase = 0 __UpperCamelCase = distance heap.heapify(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for _ in range(1 ,len(_SCREAMING_SNAKE_CASE ) ): __UpperCamelCase = heap.delete_minimum(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __UpperCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )] ): __UpperCamelCase = distance heap.bottom_to_top( _SCREAMING_SNAKE_CASE ,heap.get_position(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) __UpperCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a__ : Dict = int(input('Enter number of edges: ').strip()) a__ : Optional[Any] = defaultdict(list) for _ in range(edges_number): a__ : Dict = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
349
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
162
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class A ( snake_case__ ): '''simple docstring''' @slow @require_torch def lowerCamelCase__ (self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) lowercase__ = bertabert.config.encoder.vocab_size lowercase__ = tokenizer.sep_token_id lowercase__ = tokenizer.cls_token_id lowercase__ = 128 lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) lowercase__ = train_dataset.select(range(32 ) ) lowercase__ = val_dataset.select(range(16 ) ) lowercase__ = 4 def _map_to_encoder_decoder_inputs(_UpperCAmelCase : int ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) lowercase__ = inputs.input_ids lowercase__ = inputs.attention_mask lowercase__ = outputs.input_ids lowercase__ = outputs.input_ids.copy() lowercase__ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] lowercase__ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_UpperCAmelCase : Union[str, Any] ): lowercase__ = pred.label_ids lowercase__ = pred.predictions # all unnecessary tokens are removed lowercase__ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) lowercase__ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset lowercase__ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset lowercase__ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
305
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
import math from collections.abc import Iterator from itertools import takewhile def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _UpperCAmelCase ( ): __UpperCamelCase =2 while True: if is_prime(_SCREAMING_SNAKE_CASE ): yield num num += 1 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any = 2_00_00_00 ): return sum(takewhile(lambda SCREAMING_SNAKE_CASE__ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
62
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __A ( snake_case__ ,snake_case__ ,snake_case__ ): '''simple docstring''' @register_to_config def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ,_snake_case : int ,_snake_case : float ,_snake_case : int ,_snake_case : int ,_snake_case : int ,_snake_case : int ,_snake_case : str ,_snake_case : bool = False ,) -> Any: """simple docstring""" super().__init__() lowercase__ : str = nn.Embedding(UpperCAmelCase_ ,UpperCAmelCase_ ) lowercase__ : Optional[int] = nn.Embedding(UpperCAmelCase_ ,UpperCAmelCase_ ) lowercase__ : List[Any] = False lowercase__ : Optional[Any] = nn.Dropout(p=UpperCAmelCase_ ) lowercase__ : Union[str, Any] = TaConfig( vocab_size=UpperCAmelCase_ ,d_model=UpperCAmelCase_ ,num_heads=UpperCAmelCase_ ,d_kv=UpperCAmelCase_ ,d_ff=UpperCAmelCase_ ,dropout_rate=UpperCAmelCase_ ,feed_forward_proj=UpperCAmelCase_ ,is_decoder=UpperCAmelCase_ ,is_encoder_decoder=UpperCAmelCase_ ,) lowercase__ : Dict = nn.ModuleList() for lyr_num in range(UpperCAmelCase_ ): lowercase__ : List[str] = TaBlock(UpperCAmelCase_ ) self.encoders.append(UpperCAmelCase_ ) lowercase__ : List[Any] = TaLayerNorm(UpperCAmelCase_ ) lowercase__ : List[Any] = nn.Dropout(p=UpperCAmelCase_ ) def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : int ) -> str: """simple docstring""" lowercase__ : Union[str, Any] = self.token_embedder(UpperCAmelCase_ ) lowercase__ : List[str] = encoder_input_tokens.shape[1] lowercase__ : List[Any] = torch.arange(UpperCAmelCase_ ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCAmelCase_ ) lowercase__ : List[str] = self.dropout_pre(UpperCAmelCase_ ) # inverted the attention mask lowercase__ : List[str] = encoder_input_tokens.size() lowercase__ : List[str] = self.get_extended_attention_mask(UpperCAmelCase_ ,UpperCAmelCase_ ) for lyr in self.encoders: lowercase__ : Any = lyr(UpperCAmelCase_ ,UpperCAmelCase_ )[0] lowercase__ : str = self.layer_norm(UpperCAmelCase_ ) return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
16
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , ): A__, A__ = coefficient_matrix.shape A__, A__ = constant_matrix.shape if rowsa != colsa: A__ = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if colsa != 1: A__ = F"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if rowsa != rowsa: A__ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) != rowsa: A__ = ( "Number of initial values must be equal to number of rows in coefficient " F"matrix but received {len(_SCREAMING_SNAKE_CASE )} and {rowsa}" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A__ = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) A__, A__ = table.shape strictly_diagonally_dominant(_SCREAMING_SNAKE_CASE ) # Iterates the whole matrix for given number of times for _ in range(_SCREAMING_SNAKE_CASE ): A__ = [] for row in range(_SCREAMING_SNAKE_CASE ): A__ = 0 for col in range(_SCREAMING_SNAKE_CASE ): if col == row: A__ = table[row][col] elif col == cols - 1: A__ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A__ = (temp + val) / denom new_val.append(_SCREAMING_SNAKE_CASE ) A__ = new_val return [float(_SCREAMING_SNAKE_CASE ) for i in new_val] def UpperCamelCase ( _lowerCamelCase : int ): A__, A__ = table.shape A__ = True for i in range(0 , _SCREAMING_SNAKE_CASE ): A__ = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
237
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { 'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json', } class _snake_case ( snake_case__ ): _A : Optional[Any] = """autoformer""" _A : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : str = "student_t" ,SCREAMING_SNAKE_CASE__ : str = "nll" ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : int = 64 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 32 ,SCREAMING_SNAKE_CASE__ : int = 32 ,SCREAMING_SNAKE_CASE__ : str = "gelu" ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : int = 100 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int = 10 ,SCREAMING_SNAKE_CASE__ : int = 25 ,SCREAMING_SNAKE_CASE__ : int = 3 ,**SCREAMING_SNAKE_CASE__ : int ,): SCREAMING_SNAKE_CASE:Dict = prediction_length SCREAMING_SNAKE_CASE:Optional[Any] = context_length if context_length is not None else prediction_length SCREAMING_SNAKE_CASE:Optional[Any] = distribution_output SCREAMING_SNAKE_CASE:Dict = loss SCREAMING_SNAKE_CASE:List[Any] = input_size SCREAMING_SNAKE_CASE:Optional[Any] = num_time_features SCREAMING_SNAKE_CASE:Any = lags_sequence SCREAMING_SNAKE_CASE:List[str] = scaling SCREAMING_SNAKE_CASE:Union[str, Any] = num_dynamic_real_features SCREAMING_SNAKE_CASE:Optional[Any] = num_static_real_features SCREAMING_SNAKE_CASE:Any = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(UpperCAmelCase_ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE:Union[str, Any] = cardinality else: SCREAMING_SNAKE_CASE:Tuple = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(UpperCAmelCase_ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE:Tuple = embedding_dimension else: SCREAMING_SNAKE_CASE:Optional[int] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality] SCREAMING_SNAKE_CASE:List[str] = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE:Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features SCREAMING_SNAKE_CASE:Optional[Any] = d_model SCREAMING_SNAKE_CASE:int = encoder_attention_heads SCREAMING_SNAKE_CASE:Optional[int] = decoder_attention_heads SCREAMING_SNAKE_CASE:Tuple = encoder_ffn_dim SCREAMING_SNAKE_CASE:int = decoder_ffn_dim SCREAMING_SNAKE_CASE:Tuple = encoder_layers SCREAMING_SNAKE_CASE:Any = decoder_layers SCREAMING_SNAKE_CASE:Optional[Any] = dropout SCREAMING_SNAKE_CASE:Optional[int] = attention_dropout SCREAMING_SNAKE_CASE:str = activation_dropout SCREAMING_SNAKE_CASE:List[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE:str = decoder_layerdrop SCREAMING_SNAKE_CASE:Optional[Any] = activation_function SCREAMING_SNAKE_CASE:Optional[Any] = init_std SCREAMING_SNAKE_CASE:Any = use_cache # Autoformer SCREAMING_SNAKE_CASE:Optional[int] = label_length SCREAMING_SNAKE_CASE:Tuple = moving_average SCREAMING_SNAKE_CASE:Optional[Any] = autocorrelation_factor super().__init__(is_encoder_decoder=UpperCAmelCase_ ,**UpperCAmelCase_ ) @property def __UpperCamelCase ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
139
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __A: def __init__( self , _snake_case , _snake_case=2 , _snake_case=True , _snake_case=False , _snake_case=10 , _snake_case=3 , _snake_case=32 * 4 , _snake_case=32 * 6 , _snake_case=4 , _snake_case=32 , ) -> List[str]: '''simple docstring''' __a = parent __a = batch_size __a = is_training __a = use_auxiliary_loss __a = num_queries __a = num_channels __a = min_size __a = max_size __a = num_labels __a = mask_feature_size def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCAmelCase_ ) __a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_ ) __a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_ ) > 0.5 ).float() __a = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_ ) > 0.5).long() __a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a , __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = output.encoder_hidden_states __a = output.pixel_decoder_hidden_states __a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase_ ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=False ) -> List[Any]: '''simple docstring''' with torch.no_grad(): __a = MaskFormerModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __a = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ ) __a = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str: '''simple docstring''' __a = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() def comm_check_on_output(_snake_case ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __a = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ ) __a = model(UpperCAmelCase_ ) comm_check_on_output(UpperCAmelCase_ ) __a = model( pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ) comm_check_on_output(UpperCAmelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __A( snake_case__ , snake_case__ , unittest.TestCase ): snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = MaskFormerModelTester(self ) __a = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(UpperCAmelCase_ ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: __a = MaskFormerModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = (self.model_tester.min_size,) * 2 __a = { '''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=UpperCAmelCase_ ), '''class_labels''': torch.zeros(2 , 10 , device=UpperCAmelCase_ ).long(), } __a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase_ ) __a = model(**UpperCAmelCase_ ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ ) __a = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss __a = self.all_model_classes[1] __a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() __a = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ).loss loss.backward() def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.all_model_classes[1] __a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = True __a = True __a = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() __a = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ) __a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't __a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCAmelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A : Tuple = 1E-4 def __lowerCAmelCase ( ) -> Optional[Any]: __a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __A( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(UpperCAmelCase_ ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ ) __a = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): __a = model(**UpperCAmelCase_ ) __a = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) __a = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) __a = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(UpperCAmelCase_ ) .eval() ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ ) __a = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): __a = model(**UpperCAmelCase_ ) # masks_queries_logits __a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __a = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] __a = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) # class_queries_logits __a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __a = torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(UpperCAmelCase_ ) .eval() ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ ) __a = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): __a = model(**UpperCAmelCase_ ) # masks_queries_logits __a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __a = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] __a = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) # class_queries_logits __a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __a = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(UpperCAmelCase_ ) .eval() ) __a = self.default_image_processor __a = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) __a = inputs['''pixel_values'''].to(UpperCAmelCase_ ) __a = [el.to(UpperCAmelCase_ ) for el in inputs['''mask_labels''']] __a = [el.to(UpperCAmelCase_ ) for el in inputs['''class_labels''']] with torch.no_grad(): __a = model(**UpperCAmelCase_ ) self.assertTrue(outputs.loss is not None )
6
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _UpperCAmelCase ( snake_case__ ): '''simple docstring''' def __init__(self , a_ , a_ ): '''simple docstring''' __snake_case : Any = params __snake_case : Dict = np.array(UpperCAmelCase_ ) __snake_case : List[Any] = np.array([len(UpperCAmelCase_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__(self , a_ ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__(self ): '''simple docstring''' return len(self.lengths ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.params.max_model_input_size __snake_case : List[Any] = self.lengths > max_len logger.info(f"""Splitting {sum(UpperCAmelCase_ )} too long sequences.""" ) def divide_chunks(a_ , a_ ): return [l[i : i + n] for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )] __snake_case : Dict = [] __snake_case : List[Any] = [] if self.params.mlm: __snake_case , __snake_case : str = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __snake_case , __snake_case : int = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __snake_case : Union[str, Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __snake_case : List[Any] = np.insert(UpperCAmelCase_ , 0 , UpperCAmelCase_ ) if sub_s[-1] != sep_id: __snake_case : List[str] = np.insert(UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCAmelCase_ ) new_tok_ids.extend(UpperCAmelCase_ ) new_lengths.extend([len(UpperCAmelCase_ ) for l in sub_seqs] ) __snake_case : Union[str, Any] = np.array(UpperCAmelCase_ ) __snake_case : Union[str, Any] = np.array(UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = len(self ) __snake_case : List[Any] = self.lengths > 11 __snake_case : Optional[int] = self.token_ids[indices] __snake_case : str = self.lengths[indices] __snake_case : Union[str, Any] = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __snake_case : List[str] = self.params.special_tok_ids['''unk_token'''] __snake_case : str = len(self ) __snake_case : Any = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __snake_case : List[str] = (unk_occs / self.lengths) < 0.5 __snake_case : Optional[Any] = self.token_ids[indices] __snake_case : int = self.lengths[indices] __snake_case : Optional[int] = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[str] = [t[0] for t in batch] __snake_case : Dict = [t[1] for t in batch] assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) # Max for paddings __snake_case : List[str] = max(UpperCAmelCase_ ) # Pad token ids if self.params.mlm: __snake_case : Union[str, Any] = self.params.special_tok_ids['''pad_token'''] else: __snake_case : Dict = self.params.special_tok_ids['''unk_token'''] __snake_case : int = [list(t.astype(UpperCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_ )) for t in token_ids] assert len(tk_ ) == len(UpperCAmelCase_ ) assert all(len(UpperCAmelCase_ ) == max_seq_len_ for t in tk_ ) __snake_case : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_) __snake_case : int = torch.tensor(UpperCAmelCase_ ) # (bs) return tk_t, lg_t
102
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
0
'''simple docstring''' class UpperCamelCase_ : def __init__( self , A , A=None , A=None ) -> int: UpperCAmelCase : Any = data UpperCAmelCase : Tuple = previous UpperCAmelCase : Any = next_node def __str__( self ) -> str: return f'''{self.data}''' def _lowercase( self ) -> int: return self.data def _lowercase( self ) -> List[str]: return self.next def _lowercase( self ) -> Optional[int]: return self.previous class UpperCamelCase_ : def __init__( self , A ) -> List[Any]: UpperCAmelCase : Any = head def __iter__( self ) -> Tuple: return self def _lowercase( self ) -> str: if not self.current: raise StopIteration else: UpperCAmelCase : Optional[int] = self.current.get_data() UpperCAmelCase : List[Any] = self.current.get_next() return value class UpperCamelCase_ : def __init__( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = None # First node in list UpperCAmelCase : Tuple = None # Last node in list def __str__( self ) -> Tuple: UpperCAmelCase : Tuple = self.head UpperCAmelCase : str = [] while current is not None: nodes.append(current.get_data() ) UpperCAmelCase : Optional[int] = current.get_next() return " ".join(str(UpperCAmelCase_ ) for node in nodes ) def __contains__( self , A ) -> Any: UpperCAmelCase : List[str] = self.head while current: if current.get_data() == value: return True UpperCAmelCase : str = current.get_next() return False def __iter__( self ) -> int: return LinkedListIterator(self.head ) def _lowercase( self ) -> List[Any]: if self.head: return self.head.get_data() return None def _lowercase( self ) -> int: if self.tail: return self.tail.get_data() return None def _lowercase( self , A ) -> None: if self.head is None: UpperCAmelCase : Union[str, Any] = node UpperCAmelCase : str = node else: self.insert_before_node(self.head , UpperCAmelCase_ ) def _lowercase( self , A ) -> None: if self.head is None: self.set_head(UpperCAmelCase_ ) else: self.insert_after_node(self.tail , UpperCAmelCase_ ) def _lowercase( self , A ) -> None: UpperCAmelCase : List[Any] = Node(UpperCAmelCase_ ) if self.head is None: self.set_head(UpperCAmelCase_ ) else: self.set_tail(UpperCAmelCase_ ) def _lowercase( self , A , A ) -> None: UpperCAmelCase : int = node UpperCAmelCase : Optional[Any] = node.previous if node.get_previous() is None: UpperCAmelCase : Optional[Any] = node_to_insert else: UpperCAmelCase : Optional[int] = node_to_insert UpperCAmelCase : Optional[Any] = node_to_insert def _lowercase( self , A , A ) -> None: UpperCAmelCase : int = node UpperCAmelCase : Dict = node.next if node.get_next() is None: UpperCAmelCase : List[str] = node_to_insert else: UpperCAmelCase : Tuple = node_to_insert UpperCAmelCase : Optional[Any] = node_to_insert def _lowercase( self , A , A ) -> None: UpperCAmelCase : str = 1 UpperCAmelCase : Optional[Any] = Node(UpperCAmelCase_ ) UpperCAmelCase : Optional[int] = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase_ , UpperCAmelCase_ ) return current_position += 1 UpperCAmelCase : int = node.next self.insert_after_node(self.tail , UpperCAmelCase_ ) def _lowercase( self , A ) -> Node: UpperCAmelCase : str = self.head while node: if node.get_data() == item: return node UpperCAmelCase : Tuple = node.get_next() raise Exception("""Node not found""" ) def _lowercase( self , A ) -> str: if (node := self.get_node(UpperCAmelCase_ )) is not None: if node == self.head: UpperCAmelCase : Any = self.head.get_next() if node == self.tail: UpperCAmelCase : Any = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase_ ) @staticmethod def _lowercase( A ) -> None: if node.get_next(): UpperCAmelCase : Optional[Any] = node.previous if node.get_previous(): UpperCAmelCase : int = node.next UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Optional[int] = None def _lowercase( self ) -> List[str]: return self.head is None def __lowerCamelCase ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
265
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __UpperCAmelCase = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : Any = {} with open(_SCREAMING_SNAKE_CASE , 'r' ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase : int = line.strip() if line: UpperCamelCase : Any = line.split() UpperCamelCase : Tuple = line_number UpperCamelCase : int = words[0] UpperCamelCase : Dict = value return result def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any ) -> Tuple: for attribute in key.split('.' ): UpperCamelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): UpperCamelCase : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase : str = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": UpperCamelCase : List[str] = hf_pointer for attribute in hf_param_name.split('.' ): UpperCamelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = shape_pointer.shape # let's reduce dimension UpperCamelCase : Tuple = value[0] else: UpperCamelCase : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase : Dict = value elif weight_type == "weight_g": UpperCamelCase : List[Any] = value elif weight_type == "weight_v": UpperCamelCase : List[Any] = value elif weight_type == "bias": UpperCamelCase : List[str] = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): UpperCamelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = value else: UpperCamelCase : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict ) -> Tuple: UpperCamelCase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase : Optional[Any] = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase : List[str] = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCamelCase : List[str] = '.'.join([key, hf_param_name] ) else: UpperCamelCase : str = key UpperCamelCase : Tuple = value if 'lm_head' in full_key else value[0] __UpperCAmelCase = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : List[str]=None ) -> List[str]: UpperCamelCase : List[str] = False for key, mapped_key in MAPPING.items(): UpperCamelCase : Optional[int] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase : int = True if "*" in mapped_key: UpperCamelCase : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2] UpperCamelCase : List[Any] = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: UpperCamelCase : List[Any] = 'weight_g' elif "weight_v" in name: UpperCamelCase : Union[str, Any] = 'weight_v' elif "bias" in name: UpperCamelCase : List[str] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase : Optional[int] = 'weight' else: UpperCamelCase : List[str] = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def UpperCamelCase ( snake_case__ : Any , snake_case__ : Dict , snake_case__ : Tuple ) -> Any: UpperCamelCase : List[str] = [] UpperCamelCase : List[Any] = fairseq_model.state_dict() UpperCamelCase : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase : Tuple = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase : Union[str, Any] = True else: UpperCamelCase : Optional[Any] = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Union[str, Any]: UpperCamelCase : Tuple = full_name.split('conv_layers.' )[-1] UpperCamelCase : Optional[Any] = name.split('.' ) UpperCamelCase : str = int(items[0] ) UpperCamelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCamelCase : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def UpperCamelCase ( snake_case__ : int , snake_case__ : str , snake_case__ : str=None , snake_case__ : int=None , snake_case__ : List[str]=True , snake_case__ : int=False ) -> int: if config_path is not None: UpperCamelCase : List[Any] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Union[str, Any] = WavaVecaConfig() if is_seq_class: UpperCamelCase : List[Any] = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = idalabel UpperCamelCase : Any = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: UpperCamelCase : Tuple = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase : Dict = target_dict.pad_index UpperCamelCase : int = target_dict.bos_index UpperCamelCase : Optional[Any] = target_dict.eos_index UpperCamelCase : List[Any] = len(target_dict.symbols ) UpperCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase : List[Any] = 0 UpperCamelCase : Optional[int] = 1 with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = True if config.feat_extract_norm == 'layer' else False UpperCamelCase : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase : Any = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase : str = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) parser.add_argument( '''--is_seq_class''', action='''store_true''', help='''Whether the model to convert is a fine-tuned sequence classification model or not''', ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
119
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
0
'''simple docstring''' from copy import deepcopy class UpperCAmelCase__ : def __init__( self , lowercase = None , lowercase = None ) -> None: if arr is None and size is not None: __UpperCamelCase = size __UpperCamelCase = [0] * size elif arr is not None: self.init(UpperCAmelCase_ ) else: raise ValueError("""Either arr or size must be specified""" ) def __lowerCamelCase ( self , lowercase ) -> None: __UpperCamelCase = len(UpperCAmelCase_ ) __UpperCamelCase = deepcopy(UpperCAmelCase_ ) for i in range(1 , self.size ): __UpperCamelCase = self.next_(UpperCAmelCase_ ) if j < self.size: self.tree[j] += self.tree[i] def __lowerCamelCase ( self ) -> list[int]: __UpperCamelCase = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): __UpperCamelCase = self.next_(UpperCAmelCase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __lowerCamelCase ( lowercase ) -> int: return index + (index & (-index)) @staticmethod def __lowerCamelCase ( lowercase ) -> int: return index - (index & (-index)) def __lowerCamelCase ( self , lowercase , lowercase ) -> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value __UpperCamelCase = self.next_(UpperCAmelCase_ ) def __lowerCamelCase ( self , lowercase , lowercase ) -> None: self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) ) def __lowerCamelCase ( self , lowercase ) -> int: if right == 0: return 0 __UpperCamelCase = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] __UpperCamelCase = self.prev(UpperCAmelCase_ ) return result def __lowerCamelCase ( self , lowercase , lowercase ) -> int: return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ ) def __lowerCamelCase ( self , lowercase ) -> int: return self.query(UpperCAmelCase_ , index + 1 ) def __lowerCamelCase ( self , lowercase ) -> int: value -= self.tree[0] if value < 0: return -1 __UpperCamelCase = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 __UpperCamelCase = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
349
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ ( ) -> Dict: A_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg""" A_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE, stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]: A_ = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str: A_ = dct.pop(_SCREAMING_SNAKE_CASE ) A_ = val def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases A_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) A_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict A_ = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE, requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) A_ = qkv_bias def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: A_ = 3_64 if """coco""" in model_name else 2_24 A_ = InstructBlipVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: A_ = TaConfig.from_pretrained("""google/flan-t5-xl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: A_ = TaConfig.from_pretrained("""google/flan-t5-xxl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: A_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""", vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: A_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""", vocab_size=3_20_01 ).to_dict() else: raise ValueError("""Model name not supported""" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 A_ = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() A_ = InstructBlipConfig(vision_config=_SCREAMING_SNAKE_CASE, text_config=_SCREAMING_SNAKE_CASE, qformer_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=False ) -> Tuple: A_ = AutoTokenizer.from_pretrained("""bert-base-uncased""", truncation_side="""left""" ) qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} ) if "t5" in model_name: A_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""", truncation_side="""left""" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) A_ = LlamaTokenizerFast.from_pretrained( """huggyllama/llama-7b""", truncation_side="""left""", bos_token="""</s>""", unk_token="""</s>""" ) tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} ) A_ , A_ = get_blipa_config(_SCREAMING_SNAKE_CASE ) A_ = InstructBlipForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() A_ = { """instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""), """instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""), """instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""), """instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""), } A_ , A_ = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) A_ = """cuda:1""" if torch.cuda.is_available() else """cpu""" A_ = """cuda:2""" if torch.cuda.is_available() else """cpu""" A_ , A_ , A_ = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE, model_type=_SCREAMING_SNAKE_CASE, is_eval=_SCREAMING_SNAKE_CASE, device=_SCREAMING_SNAKE_CASE ) original_model.eval() print("""Done!""" ) # update state dict keys A_ = original_model.state_dict() A_ = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): A_ = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith("""Qformer.bert""" ): A_ = key.replace("""Qformer.bert""", """qformer""" ) if "attention.self" in key: A_ = key.replace("""self""", """attention""" ) if "llm_proj" in key: A_ = key.replace("""llm_proj""", """language_projection""" ) if "t5_proj" in key: A_ = key.replace("""t5_proj""", """language_projection""" ) if key.startswith("""llm_model""" ): A_ = key.replace("""llm_model""", """language_model""" ) if key.startswith("""t5""" ): A_ = key.replace("""t5""", """language""" ) A_ = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_SCREAMING_SNAKE_CASE, strict=_SCREAMING_SNAKE_CASE ) A_ = load_demo_image() A_ = """What is unusual about this image?""" # create processor A_ = BlipImageProcessor( size={"""height""": image_size, """width""": image_size}, image_mean=_SCREAMING_SNAKE_CASE, image_std=_SCREAMING_SNAKE_CASE ) A_ = InstructBlipProcessor( image_processor=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, qformer_tokenizer=_SCREAMING_SNAKE_CASE, ) A_ = processor(images=_SCREAMING_SNAKE_CASE, text=_SCREAMING_SNAKE_CASE, return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values A_ = vis_processors["""eval"""](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) A_ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ), _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "vicuna" in model_name: A_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits A_ = hf_model(**_SCREAMING_SNAKE_CASE ).logits else: A_ = original_model( {"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits A_ = tokenizer("""\n""", return_tensors="""pt""" ).input_ids.to(_SCREAMING_SNAKE_CASE ) A_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -1_00 ) A_ = hf_model(**_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE ).logits print("""First values of original logits:""", original_logits[0, :3, :3] ) print("""First values of HF logits:""", logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape A_ = 1e-4 if """vicuna""" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ), _SCREAMING_SNAKE_CASE, atol=_SCREAMING_SNAKE_CASE ) print("""Looks ok!""" ) print("""Generating with original model...""" ) A_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt}, num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("""Generating with HF model...""" ) A_ = hf_model.generate( **_SCREAMING_SNAKE_CASE, do_sample=_SCREAMING_SNAKE_CASE, num_beams=5, max_length=2_56, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? A_ = 2 print("""Original generation:""", _SCREAMING_SNAKE_CASE ) A_ = processor.batch_decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE ) A_ = [text.strip() for text in output_text] print("""HF generation:""", _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() __lowerCamelCase = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __lowerCamelCase = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
162
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Tuple = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class A ( snake_case__ ): '''simple docstring''' A__ = """mvp""" A__ = ["""past_key_values"""] A__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[int]=5_0267 , _UpperCAmelCase : Union[str, Any]=1024 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=4096 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Optional[int]=1024 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=100 , _UpperCAmelCase : Any=800 , **_UpperCAmelCase : Optional[Any] , ) -> str: """simple docstring""" lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_prompt lowercase__ = prompt_length lowercase__ = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase_ ): lowercase__ = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" )
305
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
0
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = BartphoTokenizer UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Optional[int] = True def _a ( self ) -> Any: super().setUp() __UpperCamelCase =['▁This', '▁is', '▁a', '▁t', 'est'] __UpperCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] ) with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) __UpperCamelCase =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self , **A_ ) -> Any: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _a ( self , A_ ) -> Optional[Any]: __UpperCamelCase ='This is a là test' __UpperCamelCase ='This is a<unk><unk> test' return input_text, output_text def _a ( self ) -> int: __UpperCamelCase =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) __UpperCamelCase ='This is a là test' __UpperCamelCase ='▁This ▁is ▁a ▁l à ▁t est'.split() __UpperCamelCase =tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
62
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Dict ) -> Dict: """simple docstring""" lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ,return_dict=UpperCAmelCase_ ).to(UpperCAmelCase_ ) lowercase__ : str = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase__ : List[Any] = tokenizer('''Hello there''' ,return_tensors='''pt''' ).input_ids lowercase__ : List[str] = tokenizer('''Hi I am''' ,return_tensors='''pt''' ).input_ids lowercase__ : List[Any] = model(input_ids.to(UpperCAmelCase_ ) ,labels=labels.to(UpperCAmelCase_ ) ).loss lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item()) lowercase__ : int = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
16
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]: A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) A__ = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" A__ = model(UpperCAmelCase_ )["last_hidden_state"] A__ = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # compare the actual values for a slice. A__ = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
237
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _snake_case ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE:List[Any] = BlipImageProcessor() SCREAMING_SNAKE_CASE:Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE:int = BlipProcessor(UpperCAmelCase_ ,UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : str ): return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).tokenizer def __UpperCamelCase ( self : Tuple ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor def __UpperCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:List[str] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE:Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:Dict = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE:Any = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) SCREAMING_SNAKE_CASE:Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) SCREAMING_SNAKE_CASE:List[str] = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE:List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:str = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:Optional[int] = image_processor(UpperCAmelCase_ ,return_tensors="np" ) SCREAMING_SNAKE_CASE:List[str] = processor(images=UpperCAmelCase_ ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE:Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:List[str] = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:int = "lower newer" SCREAMING_SNAKE_CASE:Any = processor(text=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Tuple = tokenizer(UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE:List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE:List[Any] = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Optional[Any] = "lower newer" SCREAMING_SNAKE_CASE:List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:List[Any] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def __UpperCamelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE:List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE:Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE:Optional[int] = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE:List[Any] = processor.batch_decode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:List[Any] = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE:Union[str, Any] = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE:List[str] = "lower newer" SCREAMING_SNAKE_CASE:Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE:Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
139
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , ) -> Any: '''simple docstring''' __a = size if size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = apply_ocr def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __A( snake_case__ , unittest.TestCase ): snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = LayoutLMvaImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''apply_ocr''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase_ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase_ ) # Test batched __a = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __a = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __a = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = LayoutLMvaImageProcessor() from datasets import load_dataset __a = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) __a = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __a = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __a = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase_ ) self.assertListEqual(encoding.boxes , UpperCAmelCase_ ) # with apply_OCR = False __a = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) __a = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
6
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
0
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict ) ->str | None: """simple docstring""" __snake_case : str = '''''' __snake_case : Optional[int] = 42 __snake_case : Any = 42 __snake_case : Dict = 42 for keychar, cipherchar in zip(cycle(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): __snake_case : str = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(_SCREAMING_SNAKE_CASE ) return decoded def lowercase ( _snake_case : List[Any] ) ->list[str]: """simple docstring""" __snake_case : Union[str, Any] = [] for key in product(_SCREAMING_SNAKE_CASE , repeat=3 ): __snake_case : Union[str, Any] = try_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if encoded is not None: possibles.append(_SCREAMING_SNAKE_CASE ) return possibles def lowercase ( _snake_case : str , _snake_case : Union[str, Any] ) ->list[str]: """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def lowercase ( _snake_case : List[str] = "p059_cipher.txt" ) ->int: """simple docstring""" __snake_case : int = 42 __snake_case : int = 42 __snake_case : str = 42 __snake_case : Dict = 42 __snake_case : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ).parent.joinpath(_SCREAMING_SNAKE_CASE ).read_text(encoding='''utf-8''' ) __snake_case : Union[str, Any] = [int(_SCREAMING_SNAKE_CASE ) for number in data.strip().split(''',''' )] __snake_case : List[str] = filter_valid_chars(_SCREAMING_SNAKE_CASE ) for common_word in COMMON_WORDS: __snake_case : Optional[Any] = filter_common_word(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 1: break __snake_case : Optional[int] = possibles[0] return sum(ord(_SCREAMING_SNAKE_CASE ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
102
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' from __future__ import annotations import math def __lowerCamelCase ( _lowercase , _lowercase ) -> float: UpperCAmelCase : str = u for i in range(1 , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : str = temp * (u - i) return temp def __lowerCamelCase ( ) -> None: UpperCAmelCase : Optional[int] = int(input("""enter the numbers of values: """ ) ) UpperCAmelCase : Optional[int] = [] for _ in range(_SCREAMING_SNAKE_CASE ): y.append([] ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): y[i].append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = 0 print("""enter the values of parameters in a list: """ ) UpperCAmelCase : List[Any] = list(map(_SCREAMING_SNAKE_CASE , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : str = float(input() ) UpperCAmelCase : str = int(input("""enter the value to interpolate: """ ) ) UpperCAmelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _SCREAMING_SNAKE_CASE ): for j in range(n - i ): UpperCAmelCase : List[str] = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase : Optional[int] = y[0][0] for i in range(1 , _SCREAMING_SNAKE_CASE ): summ += (ucal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(_SCREAMING_SNAKE_CASE ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
265
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
119
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
0
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(snake_case__) , '''Tatoeba directory does not exist.''') class UpperCAmelCase__ ( unittest.TestCase): @cached_property def __lowerCamelCase ( self ) -> Tuple: __UpperCamelCase = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCAmelCase_ ) @slow def __lowerCamelCase ( self ) -> Optional[Any]: self.resolver.convert_models(["""heb-eng"""] ) @slow def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase , __UpperCamelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCAmelCase_ ) assert mmeta["long_pair"] == "heb-eng"
349
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A_ = str(bin(_SCREAMING_SNAKE_CASE ) ) binary_number += "0" * shift_amount return binary_number def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A_ = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] if shift_amount >= len(_SCREAMING_SNAKE_CASE ): return "0b0" A_ = binary_number[: len(_SCREAMING_SNAKE_CASE ) - shift_amount] return "0b" + shifted_binary_number def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str: if number >= 0: # Get binary representation of positive number A_ = """0""" + str(bin(_SCREAMING_SNAKE_CASE ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number A_ = len(bin(_SCREAMING_SNAKE_CASE )[3:] ) # Find 2's complement of number A_ = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:] A_ = ( """1""" + """0""" * (binary_number_length - len(_SCREAMING_SNAKE_CASE )) + binary_number ) if shift_amount >= len(_SCREAMING_SNAKE_CASE ): return "0b" + binary_number[0] * len(_SCREAMING_SNAKE_CASE ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(_SCREAMING_SNAKE_CASE ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
162
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
0
def UpperCamelCase ( __magic_name__ : int ) -> bool: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = f'''Input value of [number={number}] must be an integer''' raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 0: return False lowercase__ = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
305
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Dict = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[str] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Tuple = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Tuple = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> str: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[str]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Dict = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Dict = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : int = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : List[str] = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase__ ( metaclass=snake_case__ ): """simple docstring""" UpperCAmelCase__ : Any = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) -> str: requires_backends(self , ['sentencepiece'] )
62
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __A ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : Dict ,_snake_case : Union[str, Any]=7 ,_snake_case : Optional[Any]=3 ,_snake_case : Tuple=18 ,_snake_case : List[str]=30 ,_snake_case : Optional[int]=400 ,_snake_case : Tuple=True ,_snake_case : int=32 ,_snake_case : List[Any]=True ,) -> Union[str, Any]: """simple docstring""" lowercase__ : List[Any] = parent lowercase__ : str = batch_size lowercase__ : Dict = num_channels lowercase__ : Optional[Any] = image_size lowercase__ : Any = min_resolution lowercase__ : Any = max_resolution lowercase__ : Any = do_resize lowercase__ : Optional[Any] = size_divisor lowercase__ : Tuple = do_rescale def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __A ( snake_case__ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Dict = GLPNImageProcessor if is_vision_available() else None def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" lowercase__ : str = GLPNImageProcessingTester(self ) @property def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,'''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,'''size_divisor''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,'''resample''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,'''do_rescale''' ) ) def UpperCAmelCase ( self : str ) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : str = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : int = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
16
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] ): A__ = 1.5 A__ = int(factor * num_class_images ) A__ = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=_SCREAMING_SNAKE_CASE ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: A__ = client.query(text=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4: break else: A__ = int(factor * num_images ) A__ = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) A__ = 0 A__ = 0 A__ = tqdm(desc="downloading real regularization images" , total=_SCREAMING_SNAKE_CASE ) with open(F"{class_data_dir}/caption.txt" , "w" ) as fa, open(F"{class_data_dir}/urls.txt" , "w" ) as fa, open( F"{class_data_dir}/images.txt" , "w" ) as fa: while total < num_class_images: A__ = class_images[count] count += 1 try: A__ = requests.get(images["url"] ) if img.status_code == 2_00: A__ = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCamelCase ( ): A__ = argparse.ArgumentParser("" , add_help=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_data_dir" , help="path to save images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--num_class_images" , help="number of images to download" , default=2_00 , type=_SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": __lowerCAmelCase : str =parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
237
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
0
'''simple docstring''' import re import subprocess import sys A_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") A_ = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() A_ = '|'.join(sys.argv[1:]) A_ = re.compile(Rf'''^({joined_dirs}).*?\.py$''') A_ = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
139
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
0
from maths.prime_check import is_prime def __lowerCAmelCase ( a__ ) -> int: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a = F"""Input value of [number={number}] must be an integer""" raise TypeError(_SCREAMING_SNAKE_CASE ) if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
6
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
0
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch SCREAMING_SNAKE_CASE : List[str] = True except ImportError: SCREAMING_SNAKE_CASE : int = False try: from torch.hub import _get_torch_home SCREAMING_SNAKE_CASE : str = _get_torch_home() except ImportError: SCREAMING_SNAKE_CASE : Tuple = os.path.expanduser( os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch""")) ) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(torch_cache_home, """transformers""") SCREAMING_SNAKE_CASE : Tuple = 'https://cdn.huggingface.co' SCREAMING_SNAKE_CASE : Optional[Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert' SCREAMING_SNAKE_CASE : List[str] = '/'.join(str(Path(__file__).resolve()).split("""/""")[:-1]) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(PATH, """config.yaml""") SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(PATH, """attributes.txt""") SCREAMING_SNAKE_CASE : str = os.path.join(PATH, """objects.txt""") SCREAMING_SNAKE_CASE : str = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path) SCREAMING_SNAKE_CASE : int = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE) SCREAMING_SNAKE_CASE : Tuple = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE) SCREAMING_SNAKE_CASE : Union[str, Any] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE : Optional[int] = 'config.yaml' def lowercase ( _snake_case : List[Any]=OBJECTS , _snake_case : Optional[Any]=ATTRIBUTES ) ->List[str]: """simple docstring""" __snake_case : Optional[Any] = [] with open(_SCREAMING_SNAKE_CASE ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) __snake_case : int = [] with open(_SCREAMING_SNAKE_CASE ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase ( _snake_case : int ) ->List[Any]: """simple docstring""" __snake_case : Optional[Any] = OrderedDict() with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f: __snake_case : Dict = pkl.load(_SCREAMING_SNAKE_CASE )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): __snake_case : Union[str, Any] = ckp.pop(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ): __snake_case : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE ) else: assert isinstance(_SCREAMING_SNAKE_CASE , torch.tensor ), type(_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = v return r class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ ={} def __init__(self , a_ , a_ = "root" , a_=0 ): '''simple docstring''' __snake_case : Optional[int] = name __snake_case : Dict = level __snake_case : Tuple = {} for k, v in dictionary.items(): if v is None: raise ValueError() __snake_case : int = copy.deepcopy(UpperCAmelCase_ ) __snake_case : List[Any] = copy.deepcopy(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __snake_case : Any = Config(UpperCAmelCase_ , name=UpperCAmelCase_ , level=level + 1 ) __snake_case : Union[str, Any] = v setattr(self , UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : Optional[int] = d def __repr__(self ): '''simple docstring''' return str(list((self._pointer.keys()) ) ) def __setattr__(self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = val __snake_case : Optional[int] = val __snake_case : Union[str, Any] = key.split('''.''' ) __snake_case : Dict = len(UpperCAmelCase_ ) - 1 __snake_case : Any = self._pointer if len(UpperCAmelCase_ ) > 1: for i, l in enumerate(UpperCAmelCase_ ): if hasattr(self , UpperCAmelCase_ ) and isinstance(getattr(self , UpperCAmelCase_ ) , UpperCAmelCase_ ): setattr(getattr(self , UpperCAmelCase_ ) , '''.'''.join(levels[i:] ) , UpperCAmelCase_ ) if l == last_level: __snake_case : Any = val else: __snake_case : Optional[Any] = pointer[l] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._pointer def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' with open(f"""{file_name}""" , '''w''' ) as stream: dump(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' with open(f"""{file_name}""" , '''w''' ) as stream: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) @staticmethod def SCREAMING_SNAKE_CASE (a_ ): '''simple docstring''' with open(UpperCAmelCase_ ) as stream: __snake_case : List[Any] = load(UpperCAmelCase_ , Loader=UpperCAmelCase_ ) return data def __str__(self ): '''simple docstring''' __snake_case : Any = ''' ''' if self._name != "root": __snake_case : List[str] = f"""{t * (self._level-1)}{self._name}:\n""" else: __snake_case : Union[str, Any] = '''''' __snake_case : Dict = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): r += f"""{t * (self._level)}{v}\n""" self._level += 1 else: r += f"""{t * (self._level)}{k}: {v} ({type(UpperCAmelCase_ ).__name__})\n""" __snake_case : Optional[int] = level return r[:-1] @classmethod def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ): '''simple docstring''' __snake_case , __snake_case : List[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) return cls(UpperCAmelCase_ ) @classmethod def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ): '''simple docstring''' __snake_case : Optional[int] = kwargs.pop('''cache_dir''' , UpperCAmelCase_ ) __snake_case : str = kwargs.pop('''force_download''' , UpperCAmelCase_ ) __snake_case : Any = kwargs.pop('''resume_download''' , UpperCAmelCase_ ) __snake_case : Tuple = kwargs.pop('''proxies''' , UpperCAmelCase_ ) __snake_case : Union[str, Any] = kwargs.pop('''local_files_only''' , UpperCAmelCase_ ) if os.path.isdir(UpperCAmelCase_ ): __snake_case : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) elif os.path.isfile(UpperCAmelCase_ ) or is_remote_url(UpperCAmelCase_ ): __snake_case : Union[str, Any] = pretrained_model_name_or_path else: __snake_case : Optional[Any] = hf_bucket_url(UpperCAmelCase_ , filename=UpperCAmelCase_ , use_cdn=UpperCAmelCase_ ) try: # Load from URL or cache if already cached __snake_case : List[str] = cached_path( UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __snake_case : str = Config.load_yaml(UpperCAmelCase_ ) except EnvironmentError: __snake_case : Optional[Any] = '''Can\'t load config for''' raise EnvironmentError(UpperCAmelCase_ ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(UpperCAmelCase_ ), kwargs def lowercase ( _snake_case : int ) ->int: """simple docstring""" __snake_case : Tuple = torch.load('''dump.pt''' , map_location=in_tensor.device ) __snake_case : str = in_tensor.numpy() __snake_case : List[str] = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ), ( f"""{sum([1 for x in np.isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %""" " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def lowercase ( _snake_case : Dict ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = urlparse(_SCREAMING_SNAKE_CASE ) return parsed.scheme in ("http", "https") def lowercase ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : Optional[Any]=True ) ->str: """simple docstring""" __snake_case : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __snake_case : Dict = '''/''' not in model_id if legacy_format: return f"""{endpoint}/{model_id}-{filename}""" else: return f"""{endpoint}/{model_id}/{filename}""" def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : List[Any]=0 , _snake_case : Dict=None , ) ->Any: """simple docstring""" __snake_case : Any = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): ua += "; " + "; ".join('''{}/{}'''.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): ua += "; " + user_agent __snake_case : Optional[Any] = {'''user-agent''': ua} if resume_size > 0: __snake_case : List[str] = '''bytes=%d-''' % (resume_size,) __snake_case : Optional[Any] = requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) if response.status_code == 416: # Range not satisfiable return __snake_case : Optional[Any] = response.headers.get('''Content-Length''' ) __snake_case : Union[str, Any] = resume_size + int(_SCREAMING_SNAKE_CASE ) if content_length is not None else None __snake_case : Dict = tqdm( unit='''B''' , unit_scale=_SCREAMING_SNAKE_CASE , total=_SCREAMING_SNAKE_CASE , initial=_SCREAMING_SNAKE_CASE , desc='''Downloading''' , ) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(_SCREAMING_SNAKE_CASE ) ) temp_file.write(_SCREAMING_SNAKE_CASE ) progress.close() def lowercase ( _snake_case : Tuple , _snake_case : Dict=None , _snake_case : Any=False , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=10 , _snake_case : List[Any]=False , _snake_case : Tuple=None , _snake_case : Union[str, Any]=False , ) ->Any: """simple docstring""" if cache_dir is None: __snake_case : Optional[int] = TRANSFORMERS_CACHE if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __snake_case : Union[str, Any] = str(_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) __snake_case : Dict = None if not local_files_only: try: __snake_case : List[str] = requests.head(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE ) if response.status_code == 200: __snake_case : Tuple = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __snake_case : List[str] = url_to_filename(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # get cache path to put the file __snake_case : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(_SCREAMING_SNAKE_CASE ): return cache_path else: __snake_case : Dict = [ file for file in fnmatch.filter(os.listdir(_SCREAMING_SNAKE_CASE ) , filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(_SCREAMING_SNAKE_CASE ) > 0: return os.path.join(_SCREAMING_SNAKE_CASE , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __snake_case : Union[str, Any] = cache_path + '''.lock''' with FileLock(_SCREAMING_SNAKE_CASE ): # If the download just completed while the lock was activated. if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __snake_case : List[Any] = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(_SCREAMING_SNAKE_CASE , '''a+b''' ) as f: yield f __snake_case : Optional[int] = _resumable_file_manager if os.path.exists(_SCREAMING_SNAKE_CASE ): __snake_case : Optional[Any] = os.stat(_SCREAMING_SNAKE_CASE ).st_size else: __snake_case : Any = 0 else: __snake_case : int = partial(tempfile.NamedTemporaryFile , dir=_SCREAMING_SNAKE_CASE , delete=_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' , _SCREAMING_SNAKE_CASE , temp_file.name , ) http_get( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_size=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , ) os.replace(temp_file.name , _SCREAMING_SNAKE_CASE ) __snake_case : Union[str, Any] = {'''url''': url, '''etag''': etag} __snake_case : Optional[int] = cache_path + '''.json''' with open(_SCREAMING_SNAKE_CASE , '''w''' ) as meta_file: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return cache_path def lowercase ( _snake_case : Dict , _snake_case : List[str]=None ) ->Union[str, Any]: """simple docstring""" __snake_case : Any = url.encode('''utf-8''' ) __snake_case : Optional[int] = shaaaa(_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = url_hash.hexdigest() if etag: __snake_case : str = etag.encode('''utf-8''' ) __snake_case : Dict = shaaaa(_SCREAMING_SNAKE_CASE ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def lowercase ( _snake_case : Tuple , _snake_case : Tuple=None , _snake_case : Optional[int]=False , _snake_case : Union[str, Any]=None , _snake_case : List[str]=False , _snake_case : str=None , _snake_case : Optional[Any]=False , _snake_case : Any=False , _snake_case : str=False , ) ->Any: """simple docstring""" if cache_dir is None: __snake_case : Dict = TRANSFORMERS_CACHE if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __snake_case : List[str] = str(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __snake_case : Any = str(_SCREAMING_SNAKE_CASE ) if is_remote_url(_SCREAMING_SNAKE_CASE ): # URL, so get it from the cache (downloading if necessary) __snake_case : List[Any] = get_from_cache( _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , ) elif os.path.exists(_SCREAMING_SNAKE_CASE ): # File, and it exists. __snake_case : List[str] = url_or_filename elif urlparse(_SCREAMING_SNAKE_CASE ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(_SCREAMING_SNAKE_CASE ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(_SCREAMING_SNAKE_CASE ) ) if extract_compressed_file: if not is_zipfile(_SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __snake_case , __snake_case : Any = os.path.split(_SCREAMING_SNAKE_CASE ) __snake_case : int = output_file.replace('''.''' , '''-''' ) + '''-extracted''' __snake_case : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if os.path.isdir(_SCREAMING_SNAKE_CASE ) and os.listdir(_SCREAMING_SNAKE_CASE ) and not force_extract: return output_path_extracted # Prevent parallel extractions __snake_case : str = output_path + '''.lock''' with FileLock(_SCREAMING_SNAKE_CASE ): shutil.rmtree(_SCREAMING_SNAKE_CASE , ignore_errors=_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE ) if is_zipfile(_SCREAMING_SNAKE_CASE ): with ZipFile(_SCREAMING_SNAKE_CASE , '''r''' ) as zip_file: zip_file.extractall(_SCREAMING_SNAKE_CASE ) zip_file.close() elif tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ): __snake_case : List[Any] = tarfile.open(_SCREAMING_SNAKE_CASE ) tar_file.extractall(_SCREAMING_SNAKE_CASE ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(_SCREAMING_SNAKE_CASE ) ) return output_path_extracted return output_path def lowercase ( _snake_case : List[str] , _snake_case : List[str]="," ) ->Dict: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE ) as f: __snake_case : Optional[Any] = eval(f.read() ) else: __snake_case : Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE ) try: __snake_case : Dict = requests.json() except Exception: __snake_case : int = req.content.decode() assert data is not None, "could not connect" try: __snake_case : Any = eval(_SCREAMING_SNAKE_CASE ) except Exception: __snake_case : Optional[int] = data.split('''\n''' ) req.close() return data def lowercase ( _snake_case : Any ) ->str: """simple docstring""" __snake_case : Optional[int] = requests.get(_SCREAMING_SNAKE_CASE ) __snake_case : List[str] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase ( _snake_case : Tuple ) ->int: """simple docstring""" __snake_case : List[Any] = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as stream: __snake_case : Optional[Any] = pkl.load(_SCREAMING_SNAKE_CASE ) __snake_case : List[str] = weights.pop('''model''' ) __snake_case : List[str] = {} for k, v in model.items(): __snake_case : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) if "running_var" in k: __snake_case : List[Any] = torch.tensor([0] ) __snake_case : Any = k.replace('''running_var''' , '''num_batches_tracked''' ) __snake_case : List[str] = zero return new def lowercase ( ) ->str: """simple docstring""" print(f"""{os.path.abspath(os.path.join(_SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb""" ) def lowercase ( _snake_case : List[Any] , _snake_case : Any="RGB" ) ->List[Any]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): __snake_case : List[str] = cva.imread(_SCREAMING_SNAKE_CASE ) else: __snake_case : int = get_image_from_url(_SCREAMING_SNAKE_CASE ) assert img is not None, f"""could not connect to: {im}""" __snake_case : Optional[Any] = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB ) if input_format == "RGB": __snake_case : int = img[:, :, ::-1] return img def lowercase ( _snake_case : List[str] , _snake_case : Union[str, Any]=1 ) ->int: """simple docstring""" return (images[i : i + batch] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ))
102
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]: assert x is not None assert y is not None UpperCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = len(_SCREAMING_SNAKE_CASE ) # declaring the array for storing the dp values UpperCAmelCase : Optional[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): UpperCAmelCase : str = 1 if x[i - 1] == y[j - 1] else 0 UpperCAmelCase : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) UpperCAmelCase : str = """""" UpperCAmelCase , UpperCAmelCase : Optional[Any] = m, n while i > 0 and j > 0: UpperCAmelCase : Tuple = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: UpperCAmelCase : List[Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": a : int = 'AGGTAB' a : Union[str, Any] = 'GXTXAYB' a : Optional[Any] = 4 a : Tuple = 'GTAB' a : List[Any] = longest_common_subsequence(a, b) print("""len =""", ln, """, sub-sequence =""", subseq) import doctest doctest.testmod()
265
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
0
def UpperCamelCase ( snake_case__ : str = 50 ) -> int: UpperCamelCase : Optional[int] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
119
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
0
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase__ ( snake_case__): __SCREAMING_SNAKE_CASE = 42 @flax_register_to_config class UpperCAmelCase__ ( nn.Module , snake_case__ , snake_case__): __SCREAMING_SNAKE_CASE = 3_2 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 8 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 1_2_8_0 __SCREAMING_SNAKE_CASE = 0.0 __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = jnp.floataa __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = False def __lowerCamelCase ( self , lowercase ) -> FrozenDict: __UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) __UpperCamelCase = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa ) __UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) __UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __UpperCamelCase , __UpperCamelCase = jax.random.split(UpperCAmelCase_ ) __UpperCamelCase = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"] def __lowerCamelCase ( self ) -> List[Any]: __UpperCamelCase = self.block_out_channels __UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( """At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input __UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __UpperCamelCase = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype ) __UpperCamelCase = self.only_cross_attention if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down __UpperCamelCase = [] __UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __UpperCamelCase = output_channel __UpperCamelCase = block_out_channels[i] __UpperCamelCase = i == len(UpperCAmelCase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __UpperCamelCase = FlaxDownBlockaD( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(UpperCAmelCase_ ) __UpperCamelCase = down_blocks # mid __UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up __UpperCamelCase = [] __UpperCamelCase = list(reversed(UpperCAmelCase_ ) ) __UpperCamelCase = list(reversed(UpperCAmelCase_ ) ) __UpperCamelCase = list(reversed(UpperCAmelCase_ ) ) __UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __UpperCamelCase = output_channel __UpperCamelCase = reversed_block_out_channels[i] __UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase_ ) - 1 )] __UpperCamelCase = i == len(UpperCAmelCase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": __UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __UpperCamelCase = FlaxUpBlockaD( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(UpperCAmelCase_ ) __UpperCamelCase = output_channel __UpperCamelCase = up_blocks # out __UpperCamelCase = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) __UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase = True , lowercase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: if not isinstance(UpperCAmelCase_ , jnp.ndarray ): __UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: __UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) __UpperCamelCase = jnp.expand_dims(UpperCAmelCase_ , 0 ) __UpperCamelCase = self.time_proj(UpperCAmelCase_ ) __UpperCamelCase = self.time_embedding(UpperCAmelCase_ ) # 2. pre-process __UpperCamelCase = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) ) __UpperCamelCase = self.conv_in(UpperCAmelCase_ ) # 3. down __UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCamelCase , __UpperCamelCase = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train ) else: __UpperCamelCase , __UpperCamelCase = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( UpperCAmelCase_ , UpperCAmelCase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __UpperCamelCase = new_down_block_res_samples # 4. mid __UpperCamelCase = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] __UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCamelCase = up_block( UpperCAmelCase_ , temb=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train , ) else: __UpperCamelCase = up_block(UpperCAmelCase_ , temb=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train ) # 6. post-process __UpperCamelCase = self.conv_norm_out(UpperCAmelCase_ ) __UpperCamelCase = nn.silu(UpperCAmelCase_ ) __UpperCamelCase = self.conv_out(UpperCAmelCase_ ) __UpperCamelCase = jnp.transpose(UpperCAmelCase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=UpperCAmelCase_ )
349
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class A__ ( metaclass=snake_case__ ): lowercase = ["""keras_nlp"""] def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
162
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A : List[Any] = logging.get_logger(__name__) A : str = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) A : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase__ = model_type_to_module_name(_SCREAMING_SNAKE_CASE ) lowercase__ = importlib.import_module(f'''.{module_name}''' , """transformers.models""" ) try: return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_SCREAMING_SNAKE_CASE , """__name__""" , _SCREAMING_SNAKE_CASE ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase__ = importlib.import_module("""transformers""" ) if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Dict = None , __magic_name__ : Optional[Any] = False , __magic_name__ : List[Any] = False , __magic_name__ : Dict = None , __magic_name__ : int = None , __magic_name__ : Any = None , __magic_name__ : Tuple = False , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" lowercase__ = get_file_from_repo( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , ) if resolved_config_file is None: logger.info( """Could not locate the image processor configuration file, will try to use the model config instead.""" ) return {} with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader: return json.load(_SCREAMING_SNAKE_CASE ) class A : '''simple docstring''' def __init__(self : Tuple ) -> Optional[Any]: """simple docstring""" raise EnvironmentError( """AutoImageProcessor is designed to be instantiated """ """using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(UpperCAmelCase_ ) def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> List[str]: """simple docstring""" lowercase__ = kwargs.pop("""config""" , UpperCAmelCase_ ) lowercase__ = kwargs.pop("""trust_remote_code""" , UpperCAmelCase_ ) lowercase__ = True lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) lowercase__ = config_dict.get("""image_processor_type""" , UpperCAmelCase_ ) lowercase__ = None if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ): lowercase__ = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: lowercase__ = config_dict.pop("""feature_extractor_type""" , UpperCAmelCase_ ) if feature_extractor_class is not None: logger.warning( """Could not find image processor class in the image processor config or the model config. Loading""" """ based on pattern matching with the model's feature extractor configuration.""" ) lowercase__ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" ) if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] lowercase__ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" ) logger.warning( """Could not find image processor auto map in the image processor config or the model config.""" """ Loading based on pattern matching with the model's feature extractor configuration.""" ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) # It could be in `config.image_processor_type`` lowercase__ = getattr(UpperCAmelCase_ , """image_processor_type""" , UpperCAmelCase_ ) if hasattr(UpperCAmelCase_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map: lowercase__ = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: lowercase__ = image_processor_class_from_name(UpperCAmelCase_ ) lowercase__ = image_processor_auto_map is not None lowercase__ = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING lowercase__ = resolve_trust_remote_code( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if has_remote_code and trust_remote_code: lowercase__ = get_class_from_dynamic_module( UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) lowercase__ = kwargs.pop("""code_revision""" , UpperCAmelCase_ ) if os.path.isdir(UpperCAmelCase_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) elif image_processor_class is not None: return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING: lowercase__ = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )] return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCamelCase__ (_UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
305
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ) -> List[Any]: __UpperCamelCase =parent __UpperCamelCase =batch_size __UpperCamelCase =image_size __UpperCamelCase =num_channels __UpperCamelCase =embeddings_size __UpperCamelCase =hidden_sizes __UpperCamelCase =depths __UpperCamelCase =is_training __UpperCamelCase =use_labels __UpperCamelCase =hidden_act __UpperCamelCase =num_labels __UpperCamelCase =scope __UpperCamelCase =len(UpperCAmelCase_ ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase =self.get_config() return config, pixel_values def _a ( self ) -> Dict: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _a ( self , A_ , A_ ) -> Union[str, Any]: __UpperCamelCase =FlaxRegNetModel(config=UpperCAmelCase_ ) __UpperCamelCase =model(UpperCAmelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self , A_ , A_ ) -> List[Any]: __UpperCamelCase =self.num_labels __UpperCamelCase =FlaxRegNetForImageClassification(config=UpperCAmelCase_ ) __UpperCamelCase =model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase =config_and_inputs __UpperCamelCase ={'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : int = False UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Any = False def _a ( self ) -> None: __UpperCamelCase =FlaxRegNetModelTester(self ) __UpperCamelCase =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def _a ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self ) -> str: return def _a ( self ) -> str: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _a ( self ) -> Dict: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def _a ( self ) -> Dict: pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> int: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase =model_class(UpperCAmelCase_ ) __UpperCamelCase =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase =[*signature.parameters.keys()] __UpperCamelCase =['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def _a ( self ) -> int: def check_hidden_states_output(A_ , A_ , A_ ): __UpperCamelCase =model_class(UpperCAmelCase_ ) __UpperCamelCase =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) __UpperCamelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 ) __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase =True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase =True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def _a ( self ) -> Optional[Any]: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) __UpperCamelCase =model_class(UpperCAmelCase_ ) @jax.jit def model_jitted(A_ , **A_ ): return model(pixel_values=UpperCAmelCase_ , **UpperCAmelCase_ ) with self.subTest('JIT Enabled' ): __UpperCamelCase =model_jitted(**UpperCAmelCase_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __UpperCamelCase =model_jitted(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCAmelCase ( ): __UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self ) -> Any: return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def _a ( self ) -> Optional[int]: __UpperCamelCase =FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) __UpperCamelCase =self.default_image_processor __UpperCamelCase =prepare_img() __UpperCamelCase =image_processor(images=UpperCAmelCase_ , return_tensors='np' ) __UpperCamelCase =model(**UpperCAmelCase_ ) # verify the logits __UpperCamelCase =(1, 1000) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) __UpperCamelCase =jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
62
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
0
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bool: lowercase__ : Dict = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern lowercase__ , lowercase__ : Tuple = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowercase__ : Tuple = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( __lowerCamelCase ) -> list[int]: lowercase__ : Dict = [0] lowercase__ : Union[str, Any] = 0 lowercase__ : int = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowercase__ : Tuple = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase_ = 'abc1abc12' lowerCAmelCase_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' lowerCAmelCase_ = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase_ = 'ABABX' lowerCAmelCase_ = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) lowerCAmelCase_ = 'AAAB' lowerCAmelCase_ = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) lowerCAmelCase_ = 'abcdabcy' lowerCAmelCase_ = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) lowerCAmelCase_ = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
16
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self :Union[str, Any] )-> List[str]: A__ = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A__ = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A__ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A__ = model(UpperCAmelCase_ )["last_hidden_state"].detach() self.assertEqual(output.shape , UpperCAmelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self :List[Any] )-> Tuple: A__ = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A__ = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A__ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A__ = model(UpperCAmelCase_ )["last_hidden_state"].detach() self.assertEqual(output.shape , UpperCAmelCase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) )
237
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' from collections import Counter from timeit import timeit def A_ ( snake_case = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def A_ ( snake_case = "" ): if len(_SCREAMING_SNAKE_CASE ) == 0: return True SCREAMING_SNAKE_CASE:List[str] = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE:Union[str, Any] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE:Dict = character_freq_dict.get(_SCREAMING_SNAKE_CASE , 0 ) + 1 SCREAMING_SNAKE_CASE:Tuple = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def A_ ( snake_case = "" ): print("\nFor string = " , _SCREAMING_SNAKE_CASE , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": A_ = input( "Enter string to determine if it can be rearranged as a palindrome or not: " ).strip() benchmark(check_str) A_ = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
139
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
0
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __lowerCAmelCase ( ) -> Dict: __a = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' __a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' ) return image def __lowerCAmelCase ( a__ ) -> Tuple: __a = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any: __a = dct.pop(_SCREAMING_SNAKE_CASE ) __a = val def __lowerCAmelCase ( a__ , a__ ) -> Tuple: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) __a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict __a = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) __a = qkv_bias def __lowerCAmelCase ( a__ , a__ ) -> List[str]: __a = 364 if '''coco''' in model_name else 224 __a = BlipaVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __a = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict() elif "opt-6.7b" in model_name: __a = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_SCREAMING_SNAKE_CASE ).to_dict() elif "t5-xl" in model_name: __a = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __a = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() __a = BlipaConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def __lowerCAmelCase ( a__ , a__=None , a__=False ) -> Optional[Any]: __a = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) __a = tokenizer('''\n''' , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0] __a , __a = get_blipa_config(_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE ) __a = BlipaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() __a = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } __a , __a = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) __a = '''cuda''' if torch.cuda.is_available() else '''cpu''' __a , __a , __a = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) original_model.eval() print('''Done!''' ) # update state dict keys __a = original_model.state_dict() __a = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __a = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith('''Qformer.bert''' ): __a = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: __a = key.replace('''self''' , '''attention''' ) if "opt_proj" in key: __a = key.replace('''opt_proj''' , '''language_projection''' ) if "t5_proj" in key: __a = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''opt''' ): __a = key.replace('''opt''' , '''language''' ) if key.startswith('''t5''' ): __a = key.replace('''t5''' , '''language''' ) __a = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a , __a = hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __a = load_demo_image() __a = vis_processors['''eval'''](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) __a = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_SCREAMING_SNAKE_CASE ) # create processor __a = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE ) __a = BlipaProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) __a = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "opt" in model_name: __a = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits __a = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).logits else: __a = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits __a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) __a = hf_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __a = torch.tensor( [[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_SCREAMING_SNAKE_CASE ) assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __a = torch.tensor( [[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_SCREAMING_SNAKE_CASE ) else: # cast to same type __a = logits.dtype assert torch.allclose(original_logits.to(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , atol=1e-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) __a = '''''' __a = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(_SCREAMING_SNAKE_CASE ) __a = original_model.generate({'''image''': original_pixel_values} ) __a = hf_model.generate( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('''Original generation:''' , _SCREAMING_SNAKE_CASE ) __a = input_ids.shape[1] __a = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) __a = [text.strip() for text in output_text] print('''HF generation:''' , _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": A : int = argparse.ArgumentParser() A : int = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) A : int = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
0
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : int = parent __snake_case : str = 13 __snake_case : Union[str, Any] = 7 __snake_case : List[Any] = True __snake_case : Optional[Any] = True __snake_case : Union[str, Any] = True __snake_case : int = True __snake_case : List[Any] = 99 __snake_case : Union[str, Any] = 3_84 __snake_case : Optional[int] = 2 __snake_case : Optional[int] = 4 __snake_case : Optional[Any] = 37 __snake_case : Tuple = '''gelu''' __snake_case : List[Any] = 0.1 __snake_case : Union[str, Any] = 0.1 __snake_case : int = 5_12 __snake_case : Optional[Any] = 16 __snake_case : List[str] = 2 __snake_case : Optional[int] = 0.02 __snake_case : int = 3 __snake_case : Union[str, Any] = 4 __snake_case : Optional[int] = 1_28 __snake_case : str = 2 __snake_case : Union[str, Any] = 9 __snake_case : int = 1 __snake_case : Union[str, Any] = None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Union[str, Any] = None if self.use_input_mask: __snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[Any] = None if self.use_token_type_ids: __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : int = None __snake_case : Tuple = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = TFConvBertModel(config=UpperCAmelCase_ ) __snake_case : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __snake_case : Tuple = [input_ids, input_mask] __snake_case : Optional[int] = model(UpperCAmelCase_ ) __snake_case : Any = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : int = TFConvBertForMaskedLM(config=UpperCAmelCase_ ) __snake_case : int = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __snake_case : List[Any] = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Dict = self.num_labels __snake_case : Tuple = TFConvBertForSequenceClassification(config=UpperCAmelCase_ ) __snake_case : Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __snake_case : Any = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = self.num_choices __snake_case : Optional[int] = TFConvBertForMultipleChoice(config=UpperCAmelCase_ ) __snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) __snake_case : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) __snake_case : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) __snake_case : Tuple = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __snake_case : str = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = self.num_labels __snake_case : int = TFConvBertForTokenClassification(config=UpperCAmelCase_ ) __snake_case : List[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __snake_case : Any = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Dict = TFConvBertForQuestionAnswering(config=UpperCAmelCase_ ) __snake_case : Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __snake_case : Union[str, Any] = model(UpperCAmelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case__, snake_case__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase__ =( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = TFConvBertModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = True if hasattr(UpperCAmelCase_ , '''use_cache''' ): __snake_case : List[Any] = True __snake_case : Tuple = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) __snake_case : int = getattr(self.model_tester , '''key_length''' , UpperCAmelCase_ ) for model_class in self.all_model_classes: __snake_case : List[Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : List[Any] = model_class(UpperCAmelCase_ ) __snake_case : List[str] = len(model(UpperCAmelCase_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_ , saved_model=UpperCAmelCase_ ) __snake_case : Dict = os.path.join(UpperCAmelCase_ , '''saved_model''' , '''1''' ) __snake_case : Optional[Any] = tf.keras.models.load_model(UpperCAmelCase_ ) __snake_case : Any = model(UpperCAmelCase_ ) if self.is_encoder_decoder: __snake_case : Dict = outputs['''encoder_hidden_states'''] __snake_case : int = outputs['''encoder_attentions'''] else: __snake_case : Tuple = outputs['''hidden_states'''] __snake_case : List[Any] = outputs['''attentions'''] self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) __snake_case : Dict = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) self.assertIsNotNone(UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : int = True __snake_case : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length ) __snake_case : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) __snake_case : str = getattr(self.model_tester , '''key_length''' , UpperCAmelCase_ ) __snake_case : str = getattr(self.model_tester , '''key_length''' , UpperCAmelCase_ ) def check_decoder_attentions_output(a_ ): __snake_case : Optional[int] = len(UpperCAmelCase_ ) self.assertEqual(out_len % 2 , 0 ) __snake_case : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(a_ ): __snake_case : Tuple = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __snake_case : Dict = True __snake_case : List[Any] = False __snake_case : Any = model_class(UpperCAmelCase_ ) __snake_case : List[str] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) __snake_case : Any = len(UpperCAmelCase_ ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) if self.is_encoder_decoder: __snake_case : str = model_class(UpperCAmelCase_ ) __snake_case : List[str] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_decoder_attentions_output(UpperCAmelCase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __snake_case : List[str] = True __snake_case : Tuple = model_class(UpperCAmelCase_ ) __snake_case : Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) # Check attention is always last and order is fine __snake_case : List[Any] = True __snake_case : Union[str, Any] = True __snake_case : Optional[Any] = model_class(UpperCAmelCase_ ) __snake_case : List[str] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ ) check_encoder_attentions_output(UpperCAmelCase_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) __snake_case : Any = tf.constant([[0, 1, 2, 3, 4, 5]] ) __snake_case : Union[str, Any] = model(UpperCAmelCase_ )[0] __snake_case : Optional[int] = [1, 6, 7_68] self.assertEqual(output.shape , UpperCAmelCase_ ) __snake_case : Optional[Any] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
102
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
0
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() a : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase : List[str] = 1_2_8 elif "12-12" in model_name: UpperCAmelCase : Union[str, Any] = 1_2 UpperCAmelCase : Optional[int] = 1_2 elif "14-14" in model_name: UpperCAmelCase : Any = 1_4 UpperCAmelCase : Any = 1_4 elif "16-16" in model_name: UpperCAmelCase : Tuple = 1_6 UpperCAmelCase : Dict = 1_6 else: raise ValueError("""Model not supported""" ) UpperCAmelCase : Dict = """huggingface/label-files""" if "speech-commands" in model_name: UpperCAmelCase : List[str] = 3_5 UpperCAmelCase : Optional[Any] = """speech-commands-v2-id2label.json""" else: UpperCAmelCase : List[str] = 5_2_7 UpperCAmelCase : Tuple = """audioset-id2label.json""" UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : str = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( _lowercase ) -> str: if "module.v" in name: UpperCAmelCase : int = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: UpperCAmelCase : str = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: UpperCAmelCase : str = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: UpperCAmelCase : List[Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: UpperCAmelCase : Tuple = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: UpperCAmelCase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase : Dict = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase : Tuple = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase : Tuple = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase : Union[str, Any] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase : Dict = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: UpperCAmelCase : Dict = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def __lowerCamelCase ( _lowercase , _lowercase ) -> str: for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: UpperCAmelCase : int = key.split(""".""" ) UpperCAmelCase : Tuple = int(key_split[3] ) UpperCAmelCase : int = config.hidden_size if "weight" in key: UpperCAmelCase : List[Any] = val[:dim, :] UpperCAmelCase : Dict = val[dim : dim * 2, :] UpperCAmelCase : List[Any] = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : Optional[Any] = val[dim : dim * 2] UpperCAmelCase : int = val[-dim:] else: UpperCAmelCase : Optional[int] = val return orig_state_dict def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase : Optional[Any] = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict UpperCAmelCase : Any = model_name_to_url[model_name] UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys UpperCAmelCase : Union[str, Any] = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model UpperCAmelCase : str = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase : Union[str, Any] = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 UpperCAmelCase : Dict = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 UpperCAmelCase : List[Any] = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8 UpperCAmelCase : Union[str, Any] = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: UpperCAmelCase : Dict = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) UpperCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""] else: UpperCAmelCase : Tuple = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) UpperCAmelCase , UpperCAmelCase : Dict = torchaudio.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = waveform.squeeze().numpy() UpperCAmelCase : Optional[int] = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" ) # forward pass UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase : Any = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase : List[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase : List[str] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase : Tuple = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase : Dict = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase : int = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a : int = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
265
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 __UpperCAmelCase = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class lowerCAmelCase_ ( snake_case__ ): UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Optional[Any] = ["""input_ids""", """attention_mask"""] UpperCAmelCase__ : str = TaTokenizer UpperCAmelCase__ : List[int] = [] def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]: if extra_ids > 0 and additional_special_tokens is None: UpperCamelCase : int = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens UpperCamelCase : Union[str, Any] = len(set(filter(lambda SCREAMING_SNAKE_CASE_ : bool('extra_id_' in str(UpperCAmelCase_ ) ), UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( UpperCAmelCase_, tokenizer_file=UpperCAmelCase_, eos_token=UpperCAmelCase_, unk_token=UpperCAmelCase_, pad_token=UpperCAmelCase_, extra_ids=UpperCAmelCase_, additional_special_tokens=UpperCAmelCase_, **UpperCAmelCase_, ) UpperCamelCase : int = vocab_file UpperCamelCase : Dict = False if not self.vocab_file else True UpperCamelCase : Union[str, Any] = extra_ids @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: UpperCamelCase : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.', UpperCAmelCase_, ) return max_model_length def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : str = os.path.join( UpperCAmelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file, UpperCAmelCase_ ) logger.info(F"""Copy vocab file to {out_vocab_file}""" ) return (out_vocab_file,) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Optional[Any] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: UpperCamelCase : Tuple = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase : Optional[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def snake_case_ ( self ) -> Dict: return list( set(filter(lambda SCREAMING_SNAKE_CASE_ : bool(re.search(R'<extra_id_\d+>', UpperCAmelCase_ ) ) is not None, self.additional_special_tokens ) ) ) def snake_case_ ( self ) -> Optional[Any]: return [self.convert_tokens_to_ids(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
119
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
0
'''simple docstring''' import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file a__ : Optional[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.' def _lowercase ( __A=None ): '''simple docstring''' if subparsers is not None: __UpperCamelCase = subparsers.add_parser("""tpu-config""" ,description=_description ) else: __UpperCamelCase = argparse.ArgumentParser("""Accelerate tpu-config command""" ,description=_description ) # Core arguments __UpperCamelCase = parser.add_argument_group( """Config Arguments""" ,"""Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" ,type=_SCREAMING_SNAKE_CASE ,default=_SCREAMING_SNAKE_CASE ,help="""Path to the config file to use for accelerate.""" ,) config_args.add_argument( """--tpu_name""" ,default=_SCREAMING_SNAKE_CASE ,help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" ,) config_args.add_argument( """--tpu_zone""" ,default=_SCREAMING_SNAKE_CASE ,help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" ,) __UpperCamelCase = parser.add_argument_group("""TPU Arguments""" ,"""Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" ,action="""store_true""" ,help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" ,) pod_args.add_argument( """--command_file""" ,default=_SCREAMING_SNAKE_CASE ,help="""The path to the file containing the commands to run on the pod on startup.""" ,) pod_args.add_argument( """--command""" ,action="""append""" ,nargs="""+""" ,help="""A command to run on the pod. Can be passed multiple times.""" ,) pod_args.add_argument( """--install_accelerate""" ,action="""store_true""" ,help="""Whether to install accelerate on the pod. Defaults to False.""" ,) pod_args.add_argument( """--accelerate_version""" ,default="""latest""" ,help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" ,) pod_args.add_argument( """--debug""" ,action="""store_true""" ,help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ): __UpperCamelCase = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: __UpperCamelCase = defaults.command_file if not args.command and defaults.commands is not None: __UpperCamelCase = defaults.commands if not args.tpu_name: __UpperCamelCase = defaults.tpu_name if not args.tpu_zone: __UpperCamelCase = defaults.tpu_zone if args.accelerate_version == "dev": __UpperCamelCase = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": __UpperCamelCase = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) ,_SCREAMING_SNAKE_CASE ): __UpperCamelCase = f"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file ,"""r""" ) as f: __UpperCamelCase = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] ,_SCREAMING_SNAKE_CASE ): __UpperCamelCase = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate __UpperCamelCase = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f"pip install {args.accelerate_version}"] new_cmd += args.command __UpperCamelCase = """; """.join(_SCREAMING_SNAKE_CASE ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess __UpperCamelCase = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"Running {' '.join(_SCREAMING_SNAKE_CASE )}" ) return subprocess.run(_SCREAMING_SNAKE_CASE ) print("""Successfully setup pod.""" ) def _lowercase ( ): '''simple docstring''' __UpperCamelCase = tpu_command_parser() __UpperCamelCase = parser.parse_args() tpu_command_launcher(_SCREAMING_SNAKE_CASE )
349
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
0
'''simple docstring''' from itertools import product def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[int]: A_ = sides_number A_ = max_face_number * dice_number A_ = [0] * (max_total + 1) A_ = 1 A_ = range(_SCREAMING_SNAKE_CASE, max_face_number + 1 ) for dice_numbers in product(_SCREAMING_SNAKE_CASE, repeat=_SCREAMING_SNAKE_CASE ): A_ = sum(_SCREAMING_SNAKE_CASE ) totals_frequencies[total] += 1 return totals_frequencies def UpperCAmelCase__ ( ) -> float: A_ = total_frequency_distribution( sides_number=4, dice_number=9 ) A_ = total_frequency_distribution( sides_number=6, dice_number=6 ) A_ = 0 A_ = 9 A_ = 4 * 9 A_ = 6 for peter_total in range(_SCREAMING_SNAKE_CASE, max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) A_ = (4**9) * (6**6) A_ = peter_wins_count / total_games_number A_ = round(_SCREAMING_SNAKE_CASE, ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
162
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int: snake_case_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
347
0
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch A : List[Any] = random.Random() def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Tuple=1.0 , __magic_name__ : Optional[int]=None , __magic_name__ : str=None ) -> Dict: """simple docstring""" if rng is None: lowercase__ = global_rng lowercase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class A ( unittest.TestCase ): '''simple docstring''' def __init__(self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int=7 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : Tuple=2000 , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : str=160 , _UpperCAmelCase : List[str]=8 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=4000 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]=True , ) -> List[str]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = min_seq_length lowercase__ = max_seq_length lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ = padding_value lowercase__ = sampling_rate lowercase__ = return_attention_mask lowercase__ = do_normalize lowercase__ = feature_size lowercase__ = chunk_length lowercase__ = hop_length def lowerCamelCase__ (self : str ) -> Dict: """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=False ) -> Optional[Any]: """simple docstring""" def _flatten(_UpperCAmelCase : Tuple ): return list(itertools.chain(*UpperCAmelCase_ ) ) if equal_length: lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A ( snake_case__ , unittest.TestCase ): '''simple docstring''' A__ = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase__ (self : str ) -> List[Any]: """simple docstring""" lowercase__ = WhisperFeatureExtractionTester(self ) def lowerCamelCase__ (self : Dict ) -> List[Any]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0] check_json_file_has_correct_format(UpperCAmelCase_ ) lowercase__ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ ) lowercase__ = feat_extract_first.to_dict() lowercase__ = feat_extract_second.to_dict() lowercase__ = feat_extract_first.mel_filters lowercase__ = feat_extract_second.mel_filters self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase__ (self : List[str] ) -> int: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(UpperCAmelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCAmelCase_ ) lowercase__ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ ) lowercase__ = feat_extract_first.to_dict() lowercase__ = feat_extract_second.to_dict() lowercase__ = feat_extract_first.mel_filters lowercase__ = feat_extract_second.mel_filters self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs] # Test feature size lowercase__ = feature_extractor(UpperCAmelCase_ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input lowercase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features lowercase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test batched lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ = np.asarray(UpperCAmelCase_ ) lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) # Test truncation required lowercase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs] lowercase__ = [x[: feature_extractor.n_samples] for x in speech_inputs] lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated] lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) ) def lowerCamelCase__ (self : List[Any] ) -> Any: """simple docstring""" import torch lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = np.random.rand(100 , 32 ).astype(np.floataa ) lowercase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" lowercase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech lowercase__ = ds.sort("""id""" ).select(range(UpperCAmelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase__ (self : Any ) -> Any: """simple docstring""" lowercase__ = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on lowercase__ = self._load_datasamples(1 ) lowercase__ = WhisperFeatureExtractor() lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) ) def lowerCamelCase__ (self : int ) -> Optional[Any]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = self._load_datasamples(1 )[0] lowercase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue lowercase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0] self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
305
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor _A = logging.get_logger(__name__) class UpperCAmelCase__ ( snake_case__ ): """simple docstring""" def __init__( self , *A_ , **A_ ) -> None: warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
62
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class __A ( snake_case__ ): '''simple docstring''' lowerCAmelCase : Tuple = """t5""" lowerCAmelCase : Optional[Any] = ["""past_key_values"""] lowerCAmelCase : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Optional[int] ,_snake_case : Any=32_128 ,_snake_case : Optional[int]=512 ,_snake_case : Dict=64 ,_snake_case : int=2_048 ,_snake_case : Optional[Any]=6 ,_snake_case : Any=None ,_snake_case : str=8 ,_snake_case : Tuple=32 ,_snake_case : Any=128 ,_snake_case : Optional[Any]=0.1 ,_snake_case : List[Any]=1e-6 ,_snake_case : List[Any]=1.0 ,_snake_case : Any="relu" ,_snake_case : Optional[Any]=True ,_snake_case : str=True ,_snake_case : Optional[Any]=0 ,_snake_case : List[Any]=1 ,**_snake_case : Optional[int] ,) -> Optional[int]: """simple docstring""" lowercase__ : int = vocab_size lowercase__ : int = d_model lowercase__ : List[Any] = d_kv lowercase__ : Optional[Any] = d_ff lowercase__ : Optional[int] = num_layers lowercase__ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ : Tuple = num_heads lowercase__ : Tuple = relative_attention_num_buckets lowercase__ : Any = relative_attention_max_distance lowercase__ : Optional[Any] = dropout_rate lowercase__ : int = layer_norm_epsilon lowercase__ : Tuple = initializer_factor lowercase__ : int = feed_forward_proj lowercase__ : Tuple = use_cache lowercase__ : List[Any] = self.feed_forward_proj.split('''-''' ) lowercase__ : Optional[Any] = act_info[-1] lowercase__ : Union[str, Any] = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowercase__ : Tuple = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,is_encoder_decoder=UpperCAmelCase_ ,**UpperCAmelCase_ ,) class __A ( snake_case__ ): '''simple docstring''' @property def UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ : str = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: lowercase__ : Union[str, Any] = '''past_encoder_sequence + sequence''' lowercase__ : str = {0: '''batch'''} lowercase__ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowercase__ : str = {0: '''batch''', 1: '''decoder_sequence'''} lowercase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ ,direction='''inputs''' ) return common_inputs @property def UpperCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return 13
16
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
0